diff --git "a/267.jsonl" "b/267.jsonl" new file mode 100644--- /dev/null +++ "b/267.jsonl" @@ -0,0 +1,576 @@ +{"seq_id":"2421837227","text":"import re\r\nimport math\r\nimport time\r\nimport json\r\n\r\nkilled = open('killed.txt', 'r')\r\nmutants = open('mutants.log', 'r')\r\nkilled.readline()\r\ntemp = killed.readline()\r\ntemp2 = mutants.readline()\r\nwith open(\"killed.txt\") as f:\r\n for i, l in enumerate(f):\r\n pass\r\nlength = i + 1\r\ntoggle = True\r\nskip = False\r\nline = {}\r\ninfo = []\r\nfinal = ''\r\nfails = 0\r\nlive = 0\r\ntotalKill = 0\r\ntotalLive = 0\r\nprint(length)\r\nfinal = 'var data = {\"lines\": '\r\n\r\nx = 0\r\nwhile x < length:\r\n\tx += 1\r\n\tif toggle == True and x != length:\r\n\t\ttemp3 = temp2.split('@classify:')[1].split(':', 1)[0]\r\n\t\tif(temp2.split(':', 1)[0] == temp.split(',')[0]):\r\n\t\t\tif(temp.split(',')[1].rstrip() == 'FAIL'):\r\n\t\t\t\tfails += 1\r\n\t\t\telse:\r\n\t\t\t\tdiff = temp2.split('@classify:')[1].split(':', 1)[1].rstrip()\r\n\t\t\t\ttemp5 = '{\"id\":' + '\"' + temp.split(',')[0] + '\",' + '\"description\":' + '\"' + diff + '\"}'\r\n\t\t\t\tinfo.append(temp5)\r\n\t\t\t\tlive += 1\r\n\t\t\t\tx += -1\r\n\t\telse:\r\n\t\t\tskip = True\r\n\t\t\tx += -1\r\n\t\tif(skip == False):\r\n\t\t\ttemp = killed.readline()\r\n\t\t\ttemp2 = mutants.readline()\r\n\t\telse:\r\n\t\t\tx += -1\r\n\t\t\ttemp2 = mutants.readline()\r\n\t\t\tskip = False\r\n\telse:\r\n\t\ttemp6 = ''\r\n\t\tfor b,i in enumerate(info):\r\n\t\t\tif(b != len(info)-1):\r\n\t\t\t\ttemp6 += i + ','\r\n\t\t\telse:\r\n\t\t\t\ttemp6 += i\r\n\t\tif(fails == 0):\r\n\t\t\tpercent = 0\r\n\t\telif(live == 0):\r\n\t\t\tpercent = 1\r\n\t\telse:\r\n\t\t\tpercent = str(round(fails/(live+fails), 2))\r\n\t\tfinal += '[{\"id\": \"' + '%s' % temp3 + '\", \"percentage\": \"' + '%s' % percent + '\", \"live\": [' + '%s' % temp6 + ']}], \\n'\r\n\t\tinfo = []\r\n\t\ttemp3 = temp2.split('@classify:')[1].split(':', 1)[0]\r\n\t\tpercent = 0\r\n\t\ttotalKill += fails\r\n\t\ttotalLive += live\r\n\t\tfails = 0\r\n\t\tlive = 0\r\n\t\ttoggle = True\r\n\tif(temp2.split('@classify:')[1].split(':', 1)[0] != temp3):\r\n\t\ttoggle = False\r\n\telse:\r\n\t\ttoggle = True\r\n\r\nfinal = final[:-3] + '}'\r\ndata = open('data.js', 'w')\r\ndata.write(final)\r\ndata.close()\r\nif(totalKill == 0):\r\n\ttotalPercent = 0\r\nelif(totalLive == 0):\r\n\ttotalPercent = 1\r\nelse:\r\n\ttotalPercent = str(round(totalKill/(totalLive+totalKill), 2))\r\npercentages = open('percent.js', 'w')\r\npercentages.write('var totalpercent = {\"killedPercentage\": \"%s\", \"livePercentage\": \"%s\"}' % (totalPercent, str(round(1-float(totalPercent), 2))))\r\npercentages.close()","repo_name":"appu1232/Mutant-Analysis-parsing","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"32464572218","text":"from uk_gov_phe_erdst_sc import PayaraConfiguration\nfrom uk_gov_phe_erdst_sc import PayaraApplicationServer\nimport os\nimport urllib.request\n\n\ndef download_postgres_jar():\n urllib.request.urlretrieve('https://jdbc.postgresql.org/download/postgresql-42.2.20.jar', filename='postgresql-42.2.20.jar')\n\n\ndef make_domain():\n\n \"\"\" Setup the integration-tests Payara domain for testing \"\"\"\n\n config = PayaraConfiguration()\n if os.sep == '/':\n config.payara_root_folder = f\"{os.getenv('HOME')}{os.sep}awag{os.sep}payara5{os.sep}\"\n else:\n config.payara_root_folder = f'C:{os.sep}awag{os.sep}payara5{os.sep}'\n\n config.domain_name = 'prod'\n config.output_command_to_file = False\n config.execute_command = True\n config.output_to_console = True\n config.file_jdbc_jar_path = f'postgresql-42.2.20.jar'\n config.validate()\n\n payara_server = PayaraApplicationServer(config)\n payara_server.recreate()\n\n\ndownload_postgres_jar()\nmake_domain()\n","repo_name":"ukhsa-collaboration/animal-welfare-assessment-grid","sub_path":"prepare-build/github-001-setup-prod-domain.py","file_name":"github-001-setup-prod-domain.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"29"} +{"seq_id":"28082984551","text":"#!/usr/bin/env python3\n\nimport pya\n\n# filename is cmdline argument\ngdsfile = filename\noutname = filename.replace(\".gds\", \"_flat.gds\")\nlayout = pya.Layout()\nprune = True\nlevels = -1\n\n# Read each GDS files\nlayout.read(gdsfile)\n\n# Perform flatting\ntopcell = layout.top_cell().name\ntopcell_index = (layout.cell(topcell).cell_index())\nlayout.flatten(topcell_index, levels, prune)\nlayout.write(outname)\n","repo_name":"christoph-weiser/ic_tools","sub_path":"flatten/flatten.py","file_name":"flatten.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"44338310395","text":"#coding utf-8\n#Time : 2019-03-12 09:17\n\n# 格式化输出\nname='张三'\nage=22\nheight=170\n# print('python15期班有一个学生名叫'+name+',她今年'+str(age)+'岁'+',身高'+str(height))\n# print('python15期班有一个学生名叫%s,她今年%d岁,身高%s'%(name,age,height))\n# {}format\n# print('python15期班有一个学生名叫{},她今年{}岁,身高{}'.format(name,age,height))\n\n# 元组 tuple() 元组不可变,元组里面内容可以是任意类型\n# t=('hello',12,{'hello',12},(1,'python'))\n# print(t)\n# 单个取值\n# print(t[-1])\n# 切片取值\n# print(t[3][1][::-1])\n# 元组是不可变类型\n\n#列表 list []\nl=[1,2,'hello',(3,4),{'python,java'},[1,2,3]]\n\nprint(l[5][2])\n\n# 字典 dic{key:value}---字典是无序的,所以没有索引\nd={'name':'hf',\n'age':22,\n'height':'169'}\nprint(d)\nprint(d['name'][::-1])\n\n\n","repo_name":"hanfan0510/superme","sub_path":"week_1/class_0214/复习0214.py","file_name":"复习0214.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"5765975210","text":"import turtle\r\nwn = turtle.Screen()\r\nsnowflake = turtle.Turtle()\r\nsnowflake.speed(10)\r\nsnowflake.color(\"blue\")\r\nsnowflake.pensize(8)\r\n\r\nfor i in range(8):\r\n snowflake.forward(90)\r\n for i in range(3):\r\n snowflake.left(45)\r\n snowflake.forward(25)\r\n snowflake.backward(25)\r\n snowflake.right(90)\r\n snowflake.forward(25)\r\n snowflake.backward(25)\r\n snowflake.left(45)\r\n snowflake.backward(30)\r\n snowflake.right(45)\r\n\r\nwn.mainloop()\r\n \r\n \r\n","repo_name":"siasm6677/cti110","sub_path":"p4LAB1c_Sias.py","file_name":"p4LAB1c_Sias.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"19943975363","text":"\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport os\nimport preprocessor as tweet_p\nimport datetime\nimport nltk\nimport gc\nfrom sklearn.feature_extraction.text import strip_accents_unicode\nfrom sklearn.semi_supervised import LabelSpreading\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer, VectorizerMixin\nfrom sklearn.model_selection import train_test_split\n\nimport re\n\nprint(\"Initialized...\")\n\ndata_path = \"/home/juan/Desktop/Text_Mining/Om_Project/Data\"\ntables_path = \"/home/juan/Desktop/Text_Mining/Om_Project/colombia-elections-twitter/sentiment-analysis/tables\"\n\nwith open(os.path.join(tables_path,\"sentiment_labels\"), 'rb') as fp:\n sentiment_label = pickle.load(fp)\nfp.close()\n\nwith open(os.path.join(tables_path,\"tweet_id\"), 'rb') as fp:\n tweet_id = pickle.load(fp)\nfp.close()\n\nlabels_ = pd.DataFrame({\"sentiment_label\":sentiment_label,\"tweet_id\":tweet_id})\nlabels_train, labels_test= train_test_split(labels_,random_state=42)\n\n\nstopwords = nltk.corpus.stopwords.words(['spanish'])\nstemmer = nltk.stem.snowball.SnowballStemmer('spanish')\n\nmy_list=['cual','pm','am','va','p m','a m','q','ver','hoy',\n 'aca','aqui','da','m','p','tal','tan','haga',\n 'v','u','como','ve','retweeted','fm','usted','hace',\n 'responde','espere','tambien','dice','dicen','dijo',\n 'segun','segun','cada','anos','aun','aunque','cree','ay',\n 'creen','creer','creo','decir','demas','estan','retwit',\n 'hace','hacen','hacer','hecha','hicieron' ,'hizo','cosa','d',\n 'porque','demas','diga','digo','estan','etc','ir','llega','pa','ser',\n 'hoy','puede','quiere','ser','sera','si','van','ir',\n 'sr','tan','ud','va','van','vamos','voy','x','vez','sra',\n 'ahi','ahora','vez','via','vea','mas','b','uds','ahi','alla',\n 'dejen','dejar','cosas','asi','solo','rt','ps','petro',\n 'ivanduque','petrogustavo','sergio_fajardo','DeLaCalleHum',\n 'German_Vargas','duque','fajardo','vargas','lleras','colombia',\n 'alvaro','uribe','colombiano','venezuela','candidato','voto','votar']\nstopwords.extend(my_list)\n\ndef preprocessor_tweet(s):\n\n tweet_p.set_options(tweet_p.OPT.EMOJI,\n tweet_p.OPT.URL,\n tweet_p.OPT.RESERVED,\n tweet_p.OPT.SMILEY,\n tweet_p.OPT.MENTION)\n s = re.sub(r'@petrogustavo', 'petrogustavo', s)\n s = re.sub(r'@sergio_fajardo', 'sergio_fajardo', s)\n s = re.sub(r'@IvanDuque','IvanDuque',s)\n s = re.sub(r'@AlvaroUribeVel','AlvaroUribeVel',s)\n s = re.sub(r'@JuanManSantos','JuanManSantos',s)\n s = re.sub(r'@German_Vargas','German_Vargas',s)\n s = re.sub(r'@ClaudiaLopez','ClaudiaLopez',s)\n s = re.sub(r'@DeLaCalleHum','DeLaCalleHum',s)\n s = tweet_p.clean(s)\n s = re.sub(r'\\b(?:a*(?:ja)+h?|(?:l+o+)+l+)\\b', ' ', s)\n s = re.sub(r'[^\\w]', ' ', s)\n s = strip_accents_unicode(s.lower())\n s = tweet_p.clean(s)\n\n return s\n\ndef tokenize_and_stem(text):\n # first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token\n tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]\n filtered_tokens = []\n # filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)\n for token in tokens:\n if re.search('[a-zA-Z]', token):\n filtered_tokens.append(token)\n stems = [stemmer.stem(t) for t in filtered_tokens]\n return stems\n\n\ndef tokenize_only(text):\n # first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token\n tokens = [word.lower() for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]\n filtered_tokens = []\n # filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)\n for token in tokens:\n if re.search('[a-zA-Z]', token):\n filtered_tokens.append(token)\n return filtered_tokens\n\ncountvectorizer_ = CountVectorizer(tokenizer = tokenize_only,\n stop_words = stopwords,\n max_df=0.95,\n min_df=0.009,\n ngram_range=(1, 2))\n###semisup\n###\nfrom sklearn.metrics import (precision_score, recall_score,f1_score,accuracy_score,roc_auc_score,roc_curve)\n\ndata = pd.read_csv( os.path.join( data_path,\"db_tweets.csv\" ) , sep = \"|\", lineterminator = '\\n')\ndata_RF = data.merge(labels_,how='inner',left_on = \"tweet_id\",right_on = \"tweet_id\")\ndata_test = data.merge(labels_test,how='inner',left_on = \"tweet_id\",right_on = \"tweet_id\")\ndata = data.merge(labels_train,how='left',left_on = \"tweet_id\",right_on = \"tweet_id\")\ndata_labeled = data[data.sentiment_label>=0]\ndata = data[data.sentiment_label.isnull()].sample(10000)\n\ndata = pd.concat([data,data_labeled])\n\nclean_tweets = data.text_tweet.apply (preprocessor_tweet)\nclean_tweets_test = data_test.text_tweet.apply (preprocessor_tweet)\nprint(\"cleaning done!\")\n\ncountvectorizer_matrix = countvectorizer_.fit_transform (clean_tweets)\ncountvectorizer_matrix_test = countvectorizer_.transform (clean_tweets_test)\n\nlabels_g = np.array(data.sentiment_label)\nlabels_g[np.where(np.isnan(labels_g))] = -1\nlabels_g[np.where(np.isin(labels_g,99))] = 2\nprint(\"fitting model knn = 3\")\nlabel_prop_model_500 = LabelSpreading(kernel = 'knn',n_jobs = 3,n_neighbors=7)\nfrom scipy.sparse import csgraph\n\nlabel_prop_model_500.fit(countvectorizer_matrix.toarray(),labels_g)\n\nprint(\"done!\")\n\ny_test = data_test.sentiment_label\ny_pred = label_prop_model_500.predict(countvectorizer_matrix_test.toarray())\n\npres_semisup = precision_score(y_test, y_pred,average = 'weighted')\nrecall_semisup = recall_score(y_test, y_pred,average = 'weighted')\nf1_semisup = f1_score(y_test, y_pred,average = 'weighted')\nacc_semisup = accuracy_score(y_test, y_pred)\n\n\n#### RF\n########\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import RandomizedSearchCV\nimport matplotlib.pyplot as plt\n\n\nX_train,X_test,y_train,y_test = train_test_split(data_RF.text_tweet,data_RF.sentiment_label,random_state=42)\n\ny_test = y_test.reset_index(drop = True)\ny_train = y_train.reset_index(drop = True)\n\ny_test[np.where(np.isin(y_test,99))[0]] = 2\ny_train[np.where(np.isin(y_train,99))[0]] = 2\n\nclean_tweets_train = X_train.apply(preprocessor_tweet)\nclean_tweets_test = X_test.apply(preprocessor_tweet)\n\ncountvectorizer_ = CountVectorizer(tokenizer = tokenize_and_stem,\n stop_words = stopwords,\n max_df=0.95,\n min_df=0.002,\n ngram_range=(1, 3))\n\ncountvectorizer_matrix = countvectorizer_.fit_transform (clean_tweets_train)\ncountvectorizer_matrix_test = countvectorizer_.transform(clean_tweets_test)\n\nmodel_rf = RandomForestClassifier(**{'n_estimators': 400,\n 'min_samples_split': 10,\n 'min_samples_leaf': 4,\n 'max_features': 'auto',\n 'max_depth': 70,\n 'bootstrap': True})\n# Random search of parameters, using 3 fold cross validation,\n# search across 100 different combinations, and use all available cores\n# Fit the random search model\n\nmodel_rf.fit (countvectorizer_matrix, y_train)\ny_pred = model_rf.predict (countvectorizer_matrix_test)\n\npres_sup = precision_score(y_test, y_pred,average = 'weighted')\nrecall_sup = recall_score(y_test, y_pred,average = 'weighted')\nf1_sup = f1_score(y_test, y_pred,average = 'weighted')\nacc_sup = accuracy_score(y_test, y_pred)\n\nresultados = pd.DataFrame({\"f1_score\":[f1_sup,f1_semisup],\n \"precision\":[pres_sup,pres_semisup],\n \"recall\":[recall_sup,recall_semisup],\n \"accuracy\":[acc_sup,acc_semisup]},index=[\"Supervised\",\"Semisupervised\"])\n\n\nround(resultados,3).to_csv(os.path.join(data_path,\"results_classification.csv\"))\n","repo_name":"TravisDunlop/colombia-elections-twitter","sub_path":"sentiment-analysis/src/classification_sentiment.py","file_name":"classification_sentiment.py","file_ext":"py","file_size_in_byte":7965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"40916374751","text":"from copy import copy\nfrom typing import Dict, List, Tuple\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom cycler import cycler\n\n\nplt.rcParams['axes.prop_cycle'] = cycler(color=[\n '#4E79A7', '#F28E2B', '#E15759', 'dimgrey','#59A14E',\n '#EDC949','#B07AA2','#FF9DA7','#9C755F','#BAB0AC'])\n\n\ncsv2alg = {\n '/ensemble/': 'DiR',\n '/single/': 'PG',\n '/smerl_ppo/': 'SMERL',\n '/dvd/': 'DvD',\n '/multi/': 'Multi'\n}\n\nenv_and_param = [\n ['Hopper', 'mass'],\n ['Hopper', 'fric'],\n ['Walker', 'mass'],\n ['Walker', 'fric'],\n ['Ant', 'mass'],\n ['Ant', 'fric']\n]\n\ndef collect_csv_path(env: str, param: str) -> List[str]:\n all_paths = []\n for alg in ['ensemble', 'dvd', 'smerl_ppo', 'multi', 'single']:\n for seed in ['10','20', '30','40','50','60','70','80']:\n all_paths.append(\n f'/home/xukang/Project/state_filtration_for_qd/statistic/{alg}/{env}_dynamics_{param}-{seed}.csv'\n )\n return all_paths\n\ndef title_fig(env: str, param: str) -> str:\n if env in ['Hopper', 'Walker']:\n if param == 'fric':\n title = f'{env} - foot friction'\n else:\n title = f'{env} - foot {param}' \n else:\n if param == 'mass':\n title = 'Ant - leg mass'\n else:\n title = 'Ant - ankle friction'\n return title\n\n\ndef plot() -> None:\n sns.set_style('white')\n fig, axes = plt.subplots(nrows=3, ncols=2, tight_layout=True, figsize=(9, 9.5), sharey=False, sharex=True)\n for i, ax in enumerate(axes.flat):\n\n env = env_and_param[i][0]\n param = env_and_param[i][1]\n all_path = collect_csv_path(env, param)\n\n new_df = []\n for path in all_path:\n seed = path.split('.')[0].split('-')[-1]\n with open(path, 'r', encoding='utf-8') as f:\n df = pd.read_csv(path)\n\n for csvkey in list(csv2alg.keys()):\n if csvkey in path:\n alg_key = csvkey\n break\n primitive_scores = df.values[:,1:]\n max_primitive_rewards = np.max(primitive_scores, axis=-1).tolist()\n \n # remove half data\n all_param_scale = df.values[:,0].tolist()\n for j in reversed(range(len(max_primitive_rewards))):\n if j%2 == 0:\n temp = copy(max_primitive_rewards[j])\n scale_temp = copy(all_param_scale[j])\n\n max_primitive_rewards.remove(temp)\n all_param_scale.remove(scale_temp)\n\n\n # process the data\n new_df.append(\n pd.DataFrame({\n 'param scale' : all_param_scale,\n 'return' : max_primitive_rewards,\n 'alg' : [f'{csv2alg[alg_key]}' for _ in range(len(max_primitive_rewards))],\n 'seed' : [f'{seed}' for _ in range(len(max_primitive_rewards))]\n })\n ) \n\n new_df = pd.concat(new_df)\n\n # plot\n sns.lineplot(\n data = new_df,\n x = 'param scale',\n y = 'return',\n hue = 'alg',\n style = 'alg',\n ax = ax,\n dashes = True,\n markers = True,\n err_style = 'band',\n #sizes= (5,5,5,5,5)\n linewidth = 1\n )\n #ax.set_ylim([1000, 4000])\n if i == 1:\n ax.legend().set_title('')\n else:\n ax.legend().remove()\n ax.set_xlabel('Scale of the parameter', fontsize=12)\n if i % 2 == 0:\n ax.set_ylabel('Return', fontsize=12)\n else:\n ax.set_ylabel('')\n ax.set_title(title_fig(env, param), fontsize=12)\n for _, s in ax.spines.items():\n s.set_linewidth(1.2)\n\n\n #sns.despine(fig, ax)\n\n plt.show()\n\n\n\nif __name__ == '__main__':\n plot()","repo_name":"Kavka1/state_filtration_for_qd","sub_path":"script_plot/plot_for_all_parameter_disturbance_conditions.py","file_name":"plot_for_all_parameter_disturbance_conditions.py","file_ext":"py","file_size_in_byte":4059,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"32243228334","text":"import streamlit as st\r\nimport pandas as pd\r\nimport pickle\r\n\r\nimport base64 #to code & encode into pkl format\r\nimport time #for file-naming convention\r\ntimestr = time.strftime(\"%Y%m%d-%H%M%S\")\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.metrics import mean_squared_error,r2_score\r\n\r\n\r\n\r\n#Sets the layout to full width\r\nst.set_page_config(layout= \"wide\")\r\n\r\n# title of the app\r\nst.title(\"\"\"\r\n# Build a model for Predicting Sales based on Marketing Investment\r\n\r\nThe *RandomForestRegressor* is utilised in this implementation to anticipate sales based on marketing investment. \r\n\r\nTune the hyperparameters for more accuracy!\r\n\r\n\"\"\")\r\n\r\n#---------------------------------------------------------#\r\n\r\n#Model Building\r\ndef build_model(df):\r\n X = df.iloc[:,:-1]\r\n y = df.iloc[:,-1]\r\n\r\n #Data Split\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = split_size)\r\n\r\n st.markdown('**1.1 Data Splits**')\r\n st.write(\"Training Set\")\r\n st.info(X_train.shape)\r\n st.write('Test Set')\r\n st.info(X_test.shape)\r\n\r\n st.markdown('**1.2 Variable Details**')\r\n st.write('Independent variable (X)')\r\n st.info(list(X.columns))\r\n st.write('Dependent variable (y)')\r\n st.info(y.name)\r\n\r\n rf = RandomForestRegressor(n_estimators=parameter_n_estimators, random_state=parameter_random_state,\r\n max_features=parameter_max_features, criterion=parameter_criterion,\r\n min_samples_split=parameter_min_samples_split,\r\n min_samples_leaf=parameter_min_samples_leaf,\r\n bootstrap=parameter_bootstrap, oob_score=parameter_oob_score, n_jobs=parameter_n_jobs)\r\n rf_final = rf.fit(X_train,y_train)\r\n\r\n st.subheader('2. Model Performance')\r\n\r\n st.markdown(\"**2.1 Training Set\")\r\n y_pred_train = rf.predict(X_train)\r\n st.write('Coefficient of determination ($R^2$): ')\r\n st.info(r2_score(y_train,y_pred_train))\r\n\r\n st.write('Error (MSE or MAE): ')\r\n st.info(mean_squared_error(y_train,y_pred_train))\r\n\r\n\r\n st.markdown(\"**2.1 Test Set\")\r\n y_pred_test = rf.predict(X_test)\r\n st.write('Coefficient of determination ($R^2$): ')\r\n st.info(r2_score(y_test, y_pred_test))\r\n\r\n st.write('Error (MSE or MAE): ')\r\n st.info(mean_squared_error(y_test, y_pred_test))\r\n\r\n\r\n st.subheader('3. Model Parameters')\r\n st.write(rf.get_params())\r\n\r\n return rf_final\r\n\r\n\r\n\r\n#---------------------------------------------------------#\r\n\r\n#adding a sidebar\r\nst.sidebar.header(\"1. Upload File\")\r\n\r\n#setup file upload\r\nuploaded_file = st.sidebar.file_uploader(label=\"Upload your CSV file\",\r\n type=['csv'])\r\n\r\n#Sidebar - Specify Parameter settings\r\nwith st.sidebar.header(\"2. Set Parameters\"):\r\n split_size = st.sidebar.slider('Data split ratio (% for Training Set)',0.1, 0.9, 0.25, 0.05)\r\n\r\nwith st.sidebar.subheader(\"2.1 Learning Parameters\"):\r\n parameter_n_estimators = st.sidebar.slider('Number of estimators (n_estimators)', 0, 1000, 100, 100)\r\n parameter_max_features = st.sidebar.selectbox('Max features (max_features)', ('auto', 'sqrt', 'log2'))\r\n parameter_min_samples_split = st.sidebar.slider(\r\n 'Minimum number of samples required to split an internal node (min_samples_split)', 1, 10, 2, 1)\r\n parameter_min_samples_leaf = st.sidebar.slider(\r\n 'Minimum number of samples required to be at a leaf node (min_samples_leaf)', 1, 10, 2, 1)\r\n\r\nwith st.sidebar.subheader('2.2. General Parameters'):\r\n parameter_random_state = st.sidebar.slider('Seed number (random_state)', 0, 1000, 42, 1)\r\n parameter_criterion = st.sidebar.selectbox('Performance measure (criterion)', ('mse', 'mae'))\r\n parameter_bootstrap = st.sidebar.selectbox('Bootstrap samples when building trees (bootstrap)', (True, False))\r\n parameter_oob_score = st.sidebar.selectbox(\r\n 'Whether to use out-of-bag samples to estimate the R^2 on unseen data (oob_score)', (False, True))\r\n parameter_n_jobs = st.sidebar.selectbox('Number of jobs to run in parallel (n_jobs)', (1, -1))\r\n\r\n#---------------------------------------------------------#\r\n\r\n#Download model as pickle file\r\n\r\ndef download_model(model):\r\n output_model = pickle.dumps(model)\r\n b64 = base64.b64encode(output_model).decode()\r\n href = f'Download Trained Model .pkl File'\r\n st.markdown(href, unsafe_allow_html=True)\r\n\r\n#---------------------------------------------------------#\r\n\r\n#Main Panel\r\n\r\nst.subheader(\"1. Dataset\")\r\n\r\nglobal df\r\nif uploaded_file is not None:\r\n df = pd.read_csv(uploaded_file)\r\n st.markdown('**Glimpse of Dataset**')\r\n st.write(df)\r\n rfmodel = build_model(df)\r\n download_model(rfmodel)\r\nelse:\r\n st.info(\"Awaiting for CSV file to be uploaded.\")\r\n\r\n#-------------------------------------------------------#\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"shanbhag003/Streamlit-App","sub_path":"trainandtest.py","file_name":"trainandtest.py","file_ext":"py","file_size_in_byte":4998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"13441193669","text":"import pytest\nfrom unittest.mock import Mock\n\nfrom fastapi import UploadFile\nfrom src.database.psql.models import FileData\nfrom src.database import SessionLocal, Base, engine\nfrom src.services.file_services import upload_file, delete_user_file\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef db_session():\n Base.metadata.create_all(engine)\n session = SessionLocal()\n yield session\n session.close()\n Base.metadata.drop_all(engine)\n\n\ndef create_test_file(db_session, user_id, product_id=None):\n test_file = FileData(user_id=user_id, filename='test_file.jpg', file_path='test_file.jpg', product_id=product_id)\n db_session.add(test_file)\n db_session.commit()\n\n\n@pytest.mark.parametrize(\n \"user_id, filename, product_id, expected\",\n [\n (1, 'test.txt', 2, {'user_id': 1, 'filename': 'test.txt', 'product_id': 2}),\n (2, 'file2.txt', None, {'user_id': 2, 'filename': 'file2.txt', 'product_id': None}),\n (3, 'file3.txt', 3, {'user_id': 3, 'filename': 'file3.txt', 'product_id': 3})\n ]\n)\ndef test_upload_file(mocker, db_session, user_id, filename, product_id, expected):\n mock_upload_file = mocker.MagicMock(spec=UploadFile)\n mock_upload_file.file = filename\n mock_upload_file.filename = Mock(return_value=b'test.txt')\n upload_file(db_session, mock_upload_file, user_id, filename, product_id)\n\n db_file = db_session.query(FileData).filter_by(user_id=user_id, filename=filename, product_id=product_id).first()\n assert db_file is not None\n assert db_file.user_id == expected['user_id']\n assert db_file.filename == expected['filename']\n assert db_file.product_id == expected['product_id']\n\n\n@pytest.mark.parametrize(\"user_id, file_id\", [(1, 1), (2, 2), (3, 3)])\ndef test_delete_user_file(db_session, user_id, file_id):\n # Создание тестовых данных\n create_test_file(db_session, user_id)\n\n # Проверка, что данные действительно добавлены\n assert db_session.query(FileData).filter_by(id=file_id, user_id=user_id).first() is not None\n\n # Тестирование функции удаления\n deleted_file = delete_user_file(db_session, user_id, file_id)\n assert deleted_file is not None\n\n # Проверка, что данные удалены\n assert db_session.query(FileData).filter_by(id=file_id, user_id=user_id).first() is None\n\n\n@pytest.mark.parametrize(\"user_id, file_id\", [(1, 1), (2, 2), (3, 3)])\ndef test_delete_user_file(db_session, user_id, file_id):\n # Создание тестовых данных\n create_test_file(db_session, user_id)\n\n # Проверка, что данные действительно добавлены\n assert db_session.query(FileData).filter_by(id=file_id, user_id=user_id).first() is not None\n\n # Тестирование функции удаления\n deleted_file = delete_user_file(db_session, user_id, file_id)\n assert deleted_file is not None\n\n # Проверка, что данные удалены\n assert db_session.query(FileData).filter_by(id=file_id, user_id=user_id).first() is None\n","repo_name":"greyadams/file_manager","sub_path":"tests/unit/test_file_services.py","file_name":"test_file_services.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"27786538556","text":"from collections import deque\nn,m = map(int,input().split())\nworld = []\ncheck = []\nfor i in range(n):\n a = list(map(int,input().split()))\n world.append(a)\n check.append(a)\nans = [0]\nnx = [-1,1,0,0]\nny = [0,0,1,-1]\ndef bfs(x,y):\n cnt = 1\n q = deque()\n q.append((x,y))\n check[x][y] = 0\n while q:\n a,b = q.popleft()\n for i in range(4):\n dx = a + nx[i]\n dy = b + ny[i]\n if 0<=dx winnerVotes:\n winnerVotes = voteCount\n winner = person\n\n##### SUMMARY OUTPUT TO SCREEN\n\nprint(\"Election Results\")\nprint(dashbreak)\nprint(f\"Total Votes: {totalVotes})\")\nprint(dashbreak)\nfor person, voteCount in candidateVotes.items():\n print(f\"{person}: {candidatePercentages[person]} ({voteCount})\")\nprint(dashbreak)\nprint(f\"Winner: {winner}\")\nprint(dashbreak)\n\n##### WRITING THE RESULTS TO A TEXT FILE (electionAnalysis.txt)\n\noutputFile = \"/Users/rsc/Desktop/BCS Homework Assignments/HW #3/PythonChallenge/PyPoll/Analysis/electionAnalysis.txt\"\nwith open(outputFile, 'w+') as file:\n file.write(dashbreak + \"\\n\")\n file.write(f\"Total Votes: {totalVotes}\" + \"\\n\")\n file.write(dashbreak + \"\\n\")\n for person, voteCount in candidateVotes.items():\n file.write(f\"{person}: {candidatePercentages[person]} ({voteCount})\" + \"\\n\")\n file.write(dashbreak + \"\\n\")\n file.write(f\"Winner: {winner}\" + \"\\n\")\n file.write(dashbreak + \"\\n\")\n file.close()\n\n\n\n\n","repo_name":"richardsclary/PythonChallenge","sub_path":"PyPoll/electionAnalysis.py","file_name":"electionAnalysis.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"73338929357","text":"import matplotlib.pyplot as plt; plt.ion()\nimport matplotlib.gridspec as gridspec\nimport numpy as np\nimport networkx as nx\n\n\n\ndef create_network(specs = {'n01': 2, 'n0': 3, 'n1': 5}):\n net = nx.empty_graph()\n net.add_edge(0, 1)\n\n n = 2\n for i in range(n, n + specs['n01']):\n net.add_edge(0, i)\n net.add_edge(1, i)\n\n n = len(net)\n for i in range(n, n + specs['n0']):\n net.add_edge(0, i)\n\n n = len(net)\n for i in range(n, n + specs['n1']):\n net.add_edge(1, i)\n\n return net\n\n\ndef edge_colormaps(net):\n e_col = []\n for e in net.edges():\n d = net.degree(e[1])\n if d > 2:\n e_col.append('coral')\n elif d == 2:\n e_col.append('lightskyblue')\n else:\n e_col.append('lightgrey')\n return e_col\n\n\ndef net_colormap(specs):\n n_size = [20] * 2\n n_col = ['orangered'] * 2\n\n n_size += [15] * specs['n01']\n n_col += ['royalblue'] * specs['n01']\n\n n_size += [10] * (specs['n0'] + specs['n1'])\n n_col += ['slategrey'] * (specs['n0'] + specs['n1'])\n return n_size, n_col\n\n\ndef plot_net(specs = {'n01': 5, 'n0': 36, 'n1': 50}, adj=.2, ax=None):\n net = create_network(specs)\n if ax is None:\n fig, ax = plt.subplots()\n node_size, node_color = net_colormap(specs)\n edge_color = edge_colormaps(net)\n pos = nx.spring_layout(net)\n pos = adjust_pos(pos, specs, adj)\n nx.draw_networkx(net, pos=pos, ax=ax, node_size=node_size, node_color=node_color, edge_color=edge_color, width=1, with_labels=False)\n ax.axis('off')\n\n\ndef adjust_pos(pos, specs, adj=.1):\n \"\"\"\n Readjusts the position of nodes that are too close to the main tie\n \"\"\"\n ctr = (pos[0] + pos[1]) / 2\n v0 = pos[0] - pos[1]\n v = np.array([v0[0], -v0[1]])\n for node in range(2, 2 + specs['n01']):\n if np.dot(pos[node], v) >= np.dot(ctr, v):\n pos[node] += adj * v\n else:\n pos[node] += -adj * v\n return pos\n\n\n\ndef plot_ts(times, obs_w, ax=None, obs_w_l=.15, levs=.1, specs={}, var='', varname=''):\n ltimes = len(times)\n times = [obs_w[0]] + times + [obs_w[1]]\n times = [(t - obs_w[0])/(obs_w[1] - obs_w[0] + 0.) for t in times]\n if ax is None:\n fig, ax = plt.subplots(figsize=(8.8, 4))\n obs_w_l = obs_w_l\n levs = levs\n levels = [obs_w_l] + [levs] * ltimes + [obs_w_l]\n\n stemc = ax.stem(times, levels, linefmt='k-', basefmt='k-')\n stemc[0].set_visible(False) #Remove 'dots' on top of stem plot\n\n ts_levs = (levs - obs_w_l)\n for i in range(1, len(stemc[1]) - 1):\n stemc[1][i].set_lw(.5)\n stemc[1][0].set_ydata([ts_levs, obs_w_l])\n stemc[1][0].set_ls('--') #Change color and style of first and last lines\n stemc[1][0].set_color('r')\n\n stemc[1][-1].set_ydata([ts_levs, obs_w_l])\n stemc[1][-1].set_ls('--')\n stemc[1][-1].set_color('r')\n\n stemc[2].set_ydata([levs/2] * 2) # move base to the middle\n\n ax.set_ylim((ts_levs, obs_w_l))\n ax.axis('off')\n\n if var:\n w = int(specs.get('w', 0))\n x = specs.get(var, 0)\n if var == 't_stb':\n x = np.round(x / 120., 2)\n s = varname + '={}\\t'.format(x) + r'$w$' + '={}'.format(w)\n ax.annotate(s, xy=(.05, .15), xycoords='data')\n\n\nedge_values = {'b':{0:0, 2:0}, 'bt_n':{0:0, 2:0}, 't_stb':{0:0, 2:0}}\n\ndef plot_main_figure(edge_set, times_set, edge_values={}, path=''):\n obs_w = [1167609600, 1177977600]\n varnames = {'b': r'$B$', 'bt_n': r'$N^E$', 't_stb': r'$TS$'}\n fig = plt.figure()\n widths = [1, 1, 1]\n heights = [2.5, 1, 2.5, 1]\n\n spec = gridspec.GridSpec(ncols=3, nrows=4, width_ratios=widths, height_ratios=heights, hspace=.1, wspace=.2)\n edges = get_reduced_edge_set(edge_set, edge_values)\n col_var = ['b', 'bt_n', 't_stb']\n for col in range(3):\n var = col_var[col]\n for row in [0, 2]: #row corresponds to weak=0/strong=2 case\n ax = fig.add_subplot(spec[row, col])\n edge = edges[var][row]\n\n edge_specs = edge_set[var][row][edge]\n plot_net(edge_specs, ax=ax)\n\n ax2 = fig.add_subplot(spec[row + 1, col])\n times = times_set[edge]\n plot_ts(times, obs_w, var=var, varname=varnames[var], specs=edge_specs, ax=ax2)\n fig.savefig(path)\n\ndef get_reduced_edge_set(edge_set, edge_values={}):\n \"\"\"\n Retuns the a dictionary where we select the nth (edge, times) pair for each [var][weak/strong] case. edges are sorted according to the first node id.\n \"\"\"\n if not edge_values:\n edge_values = {k: {0: 0, 2: 0} for k in edge_set}\n es = {k: {} for k in edge_set}\n for k in edge_values:\n es[k][0] = sorted(edge_set[k][0])[edge_values[k][0]]\n es[k][2] = sorted(edge_set[k][2])[edge_values[k][2]]\n return es\n\n\ndef get_edge_set(df):\n \"\"\"\n Given the dataframe for the whole data, gets possible edges for the main plot according to weak/strong values\n \"\"\"\n\n vrs = ['b', 't_stb', 'bt_n', 'out_call_div']\n data_vrs = ['0', '1', 'w', 'ovrl', 'n_ij', 'deg_0', 'deg_1']\n edges = {v: {} for v in vrs}\n df_conds = (df.w > 40) & (df.w < 65) & ((df.deg_0 < 80) | (df.deg_1 < 80)) & (np.abs(df.deg_1 - df.deg_0) < 25)\n df = df[data_vrs + vrs][df_conds].sort_values('ovrl')\n\n for var in vrs:\n asc = True if var in ['t_stb', 'bt_n'] else False\n df2 = df[(df.ovrl < .03)].sort_values(var, ascending=asc)\n idx = [int(x * df2.shape[0]) for x in np.linspace(.05, .15, 20)]\n edges[var][0] = parse_edges(df2.iloc[idx])\n\n df2 = df[(df.ovrl > .12)].sort_values(var, ascending=(not asc))\n idx = [int(x * df2.shape[0]) for x in np.linspace(.05, .15, 20)]\n edges[var][2] = parse_edges(df2.iloc[idx])\n\n return edges\n\n\ndef parse_edges(edge):\n edges = {}\n for rowi in edge.iterrows():\n row = rowi[1]\n n01 = row['n_ij']\n n0 = row['deg_0'] - n01 - 1\n n1 = row['deg_1'] - n01 - 1\n hpr = {'n01': int(n01), 'n0': int(n0), 'n1': int(n1)}\n hpr.update(row.to_dict())\n edges[(int(row[0]), int(row[1]))] = hpr\n return edges\n\n\ndef get_times(edges, times_path='../full_run/times_dict.txt'):\n \"\"\"\n Given the edge set from get_edge_set, gets the timestamps for those edges\n \"\"\"\n import utils\n times_dict = {}\n edge_set = set([tuple(x) for sublist in edges.values() for y in sublist.values() for x in y])\n with open(times_path, 'r') as r:\n row = r.readline()\n while (row is not None) & (len(edge_set) > 0):\n e0, e1, times = utils.parse_time_line(row)\n edge = (e0, e1)\n if edge in edge_set:\n times_dict[edge] = times\n edge_set.remove(edge)\n row = r.readline()\n return times_dict\n\n\nif __name__ == '__main__':\n import pandas as pd\n import pickle\n import os\n #path = '/scratch/work/urenaj1/full/'\n path = '../full_run/'\n\n df_path = path + 'full_df_paper.txt'\n times_path = path + 'times_dic.txt'\n\n edges_outpath = path + 'mainplot_edges.p'\n times_outpath = path + 'mainplot_times.p'\n plot_path = '../paper/figures/figure_1.pdf'\n\n create_times = False\n\n if create_times:\n df = pd.read_csv(df_path, sep=' ')\n\n edges = get_edge_set(df)\n del df\n times = get_times(edges, times_path)\n\n pickle.dump(edges, open(edges_outpath, 'wb'))\n pickle.dump(times, open(times_outpath, 'wb'))\n else:\n edges = pickle.load(open(edges_outpath, 'rb'))\n times = pickle.load(open(times_outpath, 'rb'))\n\n edge_values = {'b':{0:0, 2:0}, 'bt_n':{0:0, 2:0}, 't_stb':{0:0, 2:0}}\n edge_values['b'][0] = 7 #5\n edge_values['b'][2] = 3\n\n edge_values['bt_n'][0] = 15 #10, 4, 17\n edge_values['bt_n'][2] = 9 #19 #15 #2 13\n\n edge_values['t_stb'][0] = 16 ###3 #1, 9\n edge_values['t_stb'][2] = 5 #1\n\n plot_main_figure(edges, times, edge_values, plot_path)\n","repo_name":"javurena7/tie_strengths","sub_path":"paper_plots.py","file_name":"paper_plots.py","file_ext":"py","file_size_in_byte":7901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"42110605725","text":"from db.run_sql import run_sql\nfrom models.booking import Booking\nimport repositories.member_repository as member_repository\nimport repositories.workout_repository as workout_repository\n\n\ndef select(id):\n booking = None\n sql = \"SELECT * FROM bookings where id = ?\"\n values = [id]\n result = run_sql(sql, values)[0]\n\n if result is not None:\n booking = Booking(result['member_id'], result['workout_id'], result['id'])\n return booking\n\n\ndef select_all():\n bookings = []\n sql = \"SELECT * FROM bookings\"\n results = run_sql(sql)\n\n for row in results:\n member = member_repository.select(row['member_id'])\n workout = workout_repository.select(row['workout_id'])\n booking = Booking(member, workout, row['id'])\n bookings.append(booking)\n return bookings\n\n\ndef get_workout(id):\n booking = select(id)\n workout = workout_repository.select(booking.workout)\n return workout\n\n\ndef get_member(id):\n booking = select(id)\n member = member_repository.select(booking.member)\n return member\n\n\ndef save(booking):\n\n sql = \"INSERT INTO bookings (member_id, workout_id) VALUES (?, ?) RETURNING id\"\n values = [booking.member.id, booking.workout.id]\n results = run_sql(sql, values)\n booking.id = results[0]['id']\n\n workout_repository.update_capacity_filled(booking.workout)\n return booking\n\n\ndef save_with_check(booking):\n workout = workout_repository.select(booking.workout.id)\n\n if workout.capacity_filled >= workout.capacity:\n return None\n else:\n sql = \"INSERT INTO bookings (member_id, workout_id) VALUES (?, ?) RETURNING id\"\n values = [booking.member.id, booking.workout.id]\n results = run_sql(sql, values)\n booking.id = results[0]['id']\n\n workout_repository.update_capacity_filled(booking.workout)\n\n return booking\n\n\ndef delete(id):\n workout = get_workout(id)\n workout_repository.reduce_capacity_filled(workout)\n\n sql = \"DELETE FROM bookings WHERE id = ?\"\n values = [id]\n run_sql(sql, values)\n\n\n\ndef delete_all():\n sql = \"DELETE FROM bookings\"\n run_sql(sql)\n\n\n","repo_name":"shunaMcCallum/gym_app_project","sub_path":"repositories/booking_repository.py","file_name":"booking_repository.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"21774345438","text":"import pyttsx\nimport RPi.GPIO as GPIO\nimport time\nGPIO.setmode(GPIO.BCM)\nTRIG = 23\nECHO = 24\nflag = \"abc\"\nprev = \"xyz\"\n\nprint(\"dIST MEASUREMENT\")\n\nGPIO.setup(TRIG,GPIO.OUT)\nGPIO.setup(ECHO,GPIO.IN)\n\nGPIO.output(TRIG, False)\nprint(\"Waiting for sensor\")\ntime.sleep(2)\n\ndef saying(tosay):\n engine = pyttsx.init()\n engine.say(tosay)\n engine.runAndWait()\n\nwhile(True):\n GPIO.output(TRIG, True)\n time.sleep(0.2)\n GPIO.output(TRIG, False)\n\n while(GPIO.input(ECHO)==0):\n pulse_start = time.time()\n\n while(GPIO.input(ECHO)==1):\n pulse_end = time.time()\n\n pulse_duration = pulse_end - pulse_start\n distance = pulse_duration * 17150\n distance = round(distance, 2)\n prev = flag\n if(distance < 50):\n tosay = \"Obstacle at \" + str(distance) + \"centi meter\"\n flag = \"yes\"\n else:\n tosay = \"obstacle not detected\"\n flag = \"no\"\n\n if(flag != prev):\n if(flag == \"yes\"):\n saying(tosay)\n\nGPIO.cleanup()\n","repo_name":"DroidRonin/Helios-","sub_path":"ultrasonic.py","file_name":"ultrasonic.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"72097027918","text":"#!/usr/bin/env python3\n#\n# MIT - (c) 2016 ThomasTJ (TTJ)\n#\n\n\nimport xml.etree.ElementTree as ET\nimport importlib\nimport os\nfrom core.colors import bc as bc\n\n\nmodulesXML = 'core/modules.xml'\n\n\ndef loadXML():\n \"\"\"Load the XML tree.\"\"\"\n tree = ET.parse(modulesXML)\n return tree.getroot()\n\n\ndef addModule(module_path):\n \"\"\"Add a module to the WMDframe.\"\"\"\n print('\\n')\n # Copy to tmp folder first\n os.system('cp ' + module_path + ' ' + 'tmp/tmpImportModule.py')\n print(bc.OKGREEN + ' -> ' + bc.ENDC + 'Module copied to tmp folder')\n root = loadXML()\n try:\n importModule = importlib.import_module('tmp.tmpImportModule')\n print(bc.OKGREEN + ' -> ' + bc.ENDC + 'Module imported into memory')\n except:\n print(bc.FAIL + ' -> Error, can\\'t find module: ' + module_path + bc.ENDC)\n return None\n\n # Check if module name already exists\n try:\n for child in root.findall('module'):\n if importModule.Options.Name == (child.get('name')):\n print(bc.FAIL + ' -> Error, module already exist with name: ' + importModule.Options.Name + bc.ENDC)\n return None\n print(bc.OKGREEN + ' -> ' + bc.ENDC + 'Modulename does not exist already')\n except:\n print(bc.FAIL + ' -> Error, something is wrong when checking the name against module.xml. Is the name defined in the module?' + bc.ENDC)\n return None\n\n # New category - then create it\n try:\n if os.path.isdir(\"modules/\" + importModule.Options.Category + \"/\") is False:\n print(bc.OKGREEN + ' -> Creating new category: ' + importModule.Options.Category + bc.ENDC)\n os.system(\"mkdir modules/\" + importModule.Options.Category)\n os.system(\"touch modules/\" + importModule.Options.Category + \"/__init__.py\")\n except:\n print(bc.FAIL + ' -> Error, couldn\\'t create new folder. Got enough user privileges?' + bc.ENDC)\n return None\n\n # Copying to tmp folder\n try:\n os.system('cp tmp/tmpImportModule.py' + ' ' + 'modules/' + importModule.Options.Category + '/' + importModule.Options.Modulename + '.py')\n print(bc.OKGREEN + ' -> ' + bc.ENDC + 'Copying module to folder')\n except:\n print(bc.FAIL + ' -> Error, couldn\\'t copy to folder. Got enough user privileges?' + bc.ENDC)\n return None\n\n # Add data to modules.xml\n print(bc.OKGREEN + ' ->' + bc.ENDC + ' Adding module to core.')\n try:\n if os.path.isfile('tmp/modules.xml.tmp'):\n os.system('rm tmp/modules.xml.tmp')\n os.system('cp ' + modulesXML + ' tmp/modules.xml.tmp')\n with open(modulesXML, 'r') as rawXML:\n data = rawXML.readlines()\n data = data[:-1]\n with open(modulesXML, 'w') as rawXML:\n rawXML.writelines(data)\n module = (\n '\\t' + '\\n' +\n '\\t\\t' + importModule.Options.Call + '' + '\\n' +\n '\\t\\t' + importModule.Options.Modulename + '' + '\\n' +\n '\\t\\t' + importModule.Options.Version + '' + '\\n' +\n '\\t\\t' + importModule.Options.Type + '' + '\\n' +\n '\\t\\t' + importModule.Options.Category + '' + '\\n' +\n '\\t\\t' + importModule.Options.Description + '' + '\\n' +\n '\\t' + '\\n' +\n ''\n )\n with open(modulesXML, 'a') as rawXML:\n rawXML.writelines(module)\n print(bc.OKGREEN + ' -> ' + bc.ENDC + 'Module data: \\n\\n' + module[:-7])\n print(bc.OKGREEN + ' -> ' + bc.ENDC + 'Module succesfully added: ' + importModule.Options.Name)\n except:\n print(bc.FAIL + ' -> Error, something went wrong while adding moduledata to modules.xml' + bc.ENDC)\n if os.path.isfile('tmp/modules.xml.tmp'):\n os.system('mv tmp/modules.xml.tmp ' + modulesXML)\n print(bc.FAIL + ' -> Backup of module.xml restored' + bc.ENDC)\n return None\n\n # Deleting module in tmp folder\n os.system('rm tmp/tmpImportModule.py')\n print(bc.OKGREEN + ' -> ' + bc.ENDC + 'Module in tmp folder deleted')\n print(bc.OKGREEN + ' !! ' + bc.ENDC + 'Thank you for adding a new module')\n\n\ndef removeModule(module_path):\n \"\"\"Remove a module from the WMDframe - ALPHA.\"\"\"\n print('')\n os.system('cp ' + module_path + ' ' + 'tmp/tmpRemoveModule.py')\n print(bc.ENDC + ' -> ' + bc.ENDC + 'Module copied to tmp folder for backup.')\n removeModule = importlib.import_module('tmp.tmpRemoveModule')\n\n name = removeModule.Options.Name\n mName = removeModule.Options.Modulename\n category = removeModule.Options.Category\n exists = 0\n print(' -> ' + 'Checking if module exists in XML file.' + bc.ENDC)\n try:\n with open(modulesXML) as infile, open(modulesXML + 'tmp', 'w') as outfile:\n for line in infile:\n if line.strip() == '':\n exists = 1\n break\n outfile.write(line)\n for line in infile:\n if line.strip() == '':\n break\n for line in infile:\n outfile.write(line)\n if exists == 1:\n os.system('mv core/modules.xmltmp core/modules.xml')\n print(bc.OKGREEN + ' -> ' + 'Module deleted in XML file. Name: ' + name + bc.ENDC)\n else:\n print(bc.WARN + ' -> ' + 'Module does not exists XML file. Name: ' + name + bc.ENDC)\n os.system('rm core/modules.xmltmp')\n except:\n print(bc.FAIL + ' -> ' + 'ERROR encountered while checking XML file.' + bc.ENDC)\n os.system('rm core/modules.xmltmp')\n\n print(' -> ' + 'Going for deletion of module in modulefolder.' + bc.ENDC)\n if os.path.isfile('modules/' + category + '/' + mName + '.py'):\n os.system('rm modules/' + category + '/' + mName + '.py')\n print(bc.OKGREEN + ' -> ' + 'Module deleted in folder. Category: ' + category + ' Modulename: ' + mName + bc.ENDC)\n else:\n print(bc.WARN + ' -> ' + 'Module does not exists in modulefolder. Path: ' + 'modules/' + category + '/' + mName + '.py' + bc.ENDC)\n\n\ndef showModules():\n \"\"\"Show all modules.\"\"\"\n root = loadXML()\n\n print('\\n')\n print('%-*s %s%s' % (5, '', bc.FAIL, '## MODULES ##'))\n print('')\n # print('%-*s%s %-*s | %-*s | %-*s | %-*s | %s %s' % (5, '', bc.FAIL, 15, 'CAT:', 12, 'TYPE:', 15, 'CALL:', 30, 'NAME:', 'DESCRIPTION:', bc.ENDC))\n print('%-*s%s %-*s %-*s %-*s %-*s %s %s' % (5, '', bc.FAIL, 15, 'CAT:', 12, 'TYPE:', 15, 'CALL:', 30, 'NAME:', 'DESCRIPTION:', bc.ENDC))\n default_data = {}\n for child in root.findall('module'):\n name = child.get('name')\n category = child.find('category').text\n type = child.find('type').text\n call = child.find('call').text\n description = child.find('description').text\n default_data[name + '\\\\' + category + '\\\\' + type + '\\\\' + call + '\\\\' + description] = (category + type + name + str('1'))\n\n type = ''\n for k, v in sorted(default_data.items(), key=lambda x: x[1]):\n a, b, c, d, e = k.split('\\\\')\n if b != type:\n print('%-*s %-*s' % (5, '', 40, '---------------------------------------------------------------------------------------------------------------------------------'))\n type = b\n # print('%-*s %-*s | %-*s | %s%-*s | %s%-*s | %s %s' % (5, '', 15, b, 12, c, bc.BOLD, 15, d, bc.ENDC, 30, a, e, bc.ENDC))\n print('%-*s %-*s %-*s %s%-*s %s%-*s %s %s' % (5, '', 15, b, 12, c, bc.BOLD, 15, d, bc.ENDC, 30, a, e, bc.ENDC))\n print('\\n')\n\n\ndef showModuleData(author, name, call, category, type, version, description, license, datecreation, lastmodified):\n \"\"\"Show a specific modules information.\"\"\"\n print(\n '' +\n '\\n\\t' + bc.OKBLUE + 'METADATA:' + bc.ENDC +\n '\\n\\t' + '---------' +\n '\\n' + '\\tArthur:\\t\\t' + author +\n '\\n' + '\\tName:\\t\\t' + name +\n '\\n' + '\\tCall:\\t\\t' + call +\n '\\n' + '\\tCat:\\t\\t' + category +\n '\\n' + '\\tType:\\t\\t' + type +\n '\\n' + '\\tVersion:\\t' + version +\n '\\n' + '\\tDescription:\\t' + description +\n '\\n' + '\\tLicense:\\t' + license +\n '\\n' + '\\tDatecreation:\\t' + datecreation +\n '\\n' + '\\tLastmodified:\\t' + lastmodified +\n '\\n'\n )\n\n\ndef existModule(call):\n \"\"\"Check if the modules exists in the modules.xml.\"\"\"\n root = loadXML()\n check = 'false'\n for child in root.findall('module'):\n if call == (child.find('call').text):\n check = 'true'\n if check == 'false':\n return 'false'\n else:\n return 'true'\n\n\ndef loadModuleCategory(call):\n \"\"\"Get module category.\"\"\"\n root = loadXML()\n for child in root.findall('module'):\n if call == (child.find('call').text):\n return child.find('category').text\n\n\ndef loadModuleMName(call):\n \"\"\"Get module filename.\"\"\"\n root = loadXML()\n for child in root.findall('module'):\n if call == (child.find('call').text):\n return child.find('modulename').text\n\n\ndef loadModuleName(call):\n \"\"\"Get module name.\"\"\"\n root = loadXML()\n for child in root.findall('module'):\n if call == (child.find('call').text):\n return child.get('name')\n\n\ndef loadModulePath(call):\n \"\"\"Get module path.\"\"\"\n call = cleanModuleCall(call)\n category = loadModuleCategory(call)\n modulename = loadModuleMName(call)\n return category + '.' + modulename\n\n\ndef loadModule(call):\n \"\"\"Load module for running.\"\"\"\n call = cleanModuleCall(call)\n if existModule(call) == 'true':\n modulepath = loadModulePath(call)\n # Check if file exists first\n return importlib.import_module('modules.' + modulepath)\n else:\n return (bc.WARN + ' ERROR, no module call found with: ' + call + bc.ENDC)\n\n\ndef cleanModuleCall(call):\n \"\"\"Simple cleaner to strip unwanted chars.\"\"\"\n call = call.strip('[]\\'')\n return call\n","repo_name":"ThomasTJdev/WMD","sub_path":"core/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":10117,"program_lang":"python","lang":"en","doc_type":"code","stars":262,"dataset":"github-code","pt":"29"} +{"seq_id":"23155396899","text":"#!/usr/bin/env python3\nimport argparse\nimport os\nimport subprocess\nimport sys\nfrom typing import Iterator, NamedTuple\n\n\nclass Context(NamedTuple):\n\timage_base: str\n\tsystem: str\n\tjava: str\n\tmcdr: str\n\ttag: str\n\n\ndef iterate_all() -> Iterator[Context]:\n\tfor image_base, systems in {\n\t\t'eclipse-temurin': ['jammy'],\n\t\t'openjdk': ['bullseye', 'slim-bullseye'],\n\t}.items():\n\t\tfor system in systems:\n\t\t\tfor java in [8, 11, 17, 21]:\n\t\t\t\tfor mcdr in ['latest', '2.12', '2.11', '2.10']:\n\t\t\t\t\ttag = f'fallenbreath/pterodactyl-yolks:minecraft-runtime-{system}-{java}-{mcdr}'\n\t\t\t\t\tyield Context(image_base, system, str(java), mcdr, tag)\n\n\ndef cmd_build(args: argparse.Namespace):\n\tfor ctx in iterate_all():\n\t\tif ctx.mcdr == 'latest':\n\t\t\tmcdr_req = 'mcdreforged'\n\t\telse:\n\t\t\tmcdr_req = f'mcdreforged~={ctx.mcdr}'\n\n\t\tprint(f'======== System: {ctx.system}, Java: {ctx.java}, MCDR: {ctx.mcdr}, Tag: {ctx.tag!r} ========')\n\n\t\tcmd = [\n\t\t\t'docker', 'build', os.getcwd(),\n\t\t\t'-t', ctx.tag,\n\t\t\t'--build-arg', f'IMAGE_BASE={ctx.image_base}',\n\t\t\t'--build-arg', f'SYSTEM={ctx.system}',\n\t\t\t'--build-arg', f'JAVA_VERSION={ctx.java}',\n\t\t\t'--build-arg', f'MCDR_REQUIREMENT={mcdr_req}',\n\t\t]\n\t\tif args.http_proxy is not None:\n\t\t\tcmd.extend([\n\t\t\t\t'--build-arg', f'http_proxy={args.http_proxy}',\n\t\t\t\t'--build-arg', f'https_proxy={args.http_proxy}',\n\t\t\t])\n\t\tsubprocess.check_call(cmd)\n\n\t\tif args.push:\n\t\t\tsubprocess.check_call(['docker', 'push', ctx.tag])\n\n\ndef cmd_push(args: argparse.Namespace):\n\tfor ctx in iterate_all():\n\t\tsubprocess.check_call(['docker', 'push', ctx.tag])\n\n\ndef cmd_delete(args: argparse.Namespace):\n\tfor ctx in iterate_all():\n\t\tsubprocess.check_call(['docker', 'image', 'rm', ctx.tag])\n\n\ndef main():\n\tparser = argparse.ArgumentParser()\n\tsubparsers = parser.add_subparsers(title='Command', help='Available commands', dest='command', required=True)\n\n\tparser_build = subparsers.add_parser('build', help='Build all images')\n\tparser_build.add_argument('-p', '--push', action='store_true', help='Push after build')\n\tparser_build.add_argument('--http-proxy', help='Set the url of http proxy to be used in build')\n\n\tsubparsers.add_parser('push', help='Push all images')\n\tsubparsers.add_parser('delete', help='Delete all images')\n\n\targs = parser.parse_args()\n\n\tif args.command == 'build':\n\t\tcmd_build(args)\n\telif args.command == 'push':\n\t\tcmd_push(args)\n\telif args.command == 'delete':\n\t\tcmd_delete(args)\n\telse:\n\t\tprint('Unknown command {!r}'.format(args.command))\n\t\tsys.exit(1)\n\n\nif __name__ == '__main__':\n\ttry:\n\t\tmain()\n\texcept subprocess.CalledProcessError as e:\n\t\tprint(type(e).__name__, e.returncode, file=sys.stderr)\n\texcept KeyboardInterrupt:\n\t\tpass\n","repo_name":"Fallen-Breath/pterodactyl-eggs","sub_path":"yolks/minecraft/runtime/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"20793011923","text":"import csv\nfrom django.core.management.base import BaseCommand\nfrom map.models import Squirrel\nimport datetime\n\nclass Command(BaseCommand):\n help = 'Import Squirrel database'\n\n def add_arguments(self, parser):\n parser.add_argument('path', type=str, help='Indicates the path where the csv file is located')\n\n\n def handle(self, *args, **options):\n path = options['path']\n print('importing data')\n with open (path) as f:\n reader = csv.reader (f, delimiter=',', quotechar=\"\\\"\")\n fields_name = next(reader)\n for i, _ in enumerate(fields_name):\n fields_name[i] = fields_name[i].lower ()\n fields_name[i] = fields_name[i].replace (' ', '_')\n for row in reader:\n squirrel = Squirrel()\n for i, field in enumerate(row):\n if fields_name[i] in [f.name for f in Squirrel._meta.get_fields()]+['x','y']:\n if fields_name[i] == 'x':\n squirrel.latitude = float(field)\n elif fields_name[i] == 'y':\n squirrel.longitude = float(field)\n elif field == 'false':\n setattr(squirrel, fields_name[i], False)\n elif field == 'true':\n setattr(squirrel, fields_name[i], True)\n elif fields_name[i] == 'date':\n setattr(squirrel, fields_name[i], datetime.datetime.strptime(field, '%m%d%Y'))\n elif squirrel._meta.get_field(fields_name[i]).choices != []:\n setattr(squirrel, fields_name[i], field.lower())\n else:\n setattr(squirrel, fields_name[i], field)\n squirrel.save()\n print(\"data succesfully imported\")\n","repo_name":"eg3048/Squirrel-Tracker","sub_path":"website/map/management/commands/import_squirrel_data.py","file_name":"import_squirrel_data.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"74761995598","text":"from collections import deque\n\n\ndef cups_and_bottles():\n cups = deque(map(int, input().split()))\n bottles = deque(map(int, input().split()))\n wasted_water = 0\n output = \"\"\n\n while cups and bottles:\n cup = cups.popleft()\n bottle = bottles.pop()\n if cup > bottle:\n cup -= bottle\n cups.appendleft(cup)\n else:\n wasted_water += bottle - cup\n if bottles:\n output = f\"Bottles: {' '.join(map(str, bottles))}\"\n elif cups:\n output = f\"Cups: {' '.join(map(str, cups))}\"\n\n output += f\"\\nWasted litters of water: {wasted_water}\"\n print(output)\n\n\ncups_and_bottles()","repo_name":"IvanParvanovski/SoftUniPractice","sub_path":"Python/Lists_as_Stacks_and_Queues(EXERCISE)/10_Cups_and_Bottles.py","file_name":"10_Cups_and_Bottles.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"29"} +{"seq_id":"39941661328","text":"from random import sample\r\nfrom search import linear_search, binary_search\r\nfrom time import time_ns\r\n\r\n\r\ndef run(n):\r\n data = [i for i in range(1, n+1)]\r\n start_time = time_ns()\r\n binary_search(data, data[-1], 0, n-1)\r\n end_time = time_ns()\r\n time_taken = end_time - start_time\r\n return time_taken\r\n\r\n\r\nif __name__ == \"__main__\":\r\n for i in range(10000000, 100000001, 10000000):\r\n print(f\"Number of sample data: {i}, Time Taken: {run(i)} nanosecond\")\r\n\r\n\r\n# def run(n):\r\n# data = sample(range(1, n+1), n)\r\n# start_time = time_ns()\r\n# l = linear_search(data, data[-1])\r\n\r\n# end_time = time_ns()\r\n\r\n# time_taken = end_time - start_time\r\n# return time_taken\r\n\r\n\r\n# if __name__ == \"__main__\":\r\n# for i in range(1000000, 10000001, 1000000):\r\n# print(f\"Number of sample data: {i}, Time Taken: {run(i)} nanosecond\")\r\n","repo_name":"Nirbhayadh/dsa-lab1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"129012958","text":"\nimport PyFBA\n\n\nsone = set()\nwith open('C.sedlakii_gf_MOPS.reactions', 'r') as fin:\n for l in fin:\n sone.add(l.strip())\n\nstwo = set()\nwith open('our_reactions.txt', 'r') as fin:\n for l in fin:\n stwo.add(l.strip())\n\nmissed = set()\nfor s in sone:\n if s not in stwo:\n missed.add(s)\n\n\nroles = PyFBA.filters.reactions_to_roles(missed)\nrolesneeded = set()\nfor m in missed:\n if m not in roles:\n roles[m] = set()\n rolesneeded.update(roles[m])\n print(\"{}\\t{}\".format(m, roles[m]))\n\n\nroles_to_add = set()\nwith open('/data/FuzzyMetabolicNetworks/models/Citrobacter/263199/citrobacter.roles', 'r') as fin:\n for l in fin:\n p=l.strip().split(\"\\t\")\n roles_to_add.add(p[0])\n\nprint(\"\\n\\n\")\n\nfor r in missed:\n willadd = False\n for role in roles[r]:\n if role in roles_to_add:\n willadd = True\n if not willadd:\n print(\"Will not add: {}\\t{}\".format(r, roles[r]))\n","repo_name":"linsalrob/Genotype-Phenotype-Modeling","sub_path":"iPythonNotebooks/miss.py","file_name":"miss.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"72391730638","text":"from sx1262 import SX1262\nimport time\nfrom L76 import l76x\nimport math\nimport hashlib\nfrom L76.micropyGPS.micropyGPS import MicropyGPS\n\n\nUARTx = 0\nBAUDRATE = 9600\ngnss_l76b=l76x.L76X(uartx=UARTx,_baudrate = BAUDRATE)\n\ngnss_l76b.l76x_exit_backup_mode()\n\n# enable/disable sync PPS when NMEA output\n'''\noptional:\nSET_SYNC_PPS_NMEA_ON\nSET_SYNC_PPS_NMEA_OFF\n'''\ngnss_l76b.l76x_send_command(gnss_l76b.SET_SYNC_PPS_NMEA_ON)\n\n\n# make an object of NMEA0183 sentence parser\n\"\"\"\nSetup GPS Object Status Flags, Internal Data Registers, etc\nlocal_offset (int): Timzone Difference to UTC\nlocation_formatting (str): Style For Presenting Longitude/Latitude:\n Decimal Degree Minute (ddm) - 40° 26.767′ N\n Degrees Minutes Seconds (dms) - 40° 26′ 46″ N\n Decimal Degrees (dd) - 40.446° N\n\"\"\"\nparser = MicropyGPS(location_formatting='dd')\n\nsentence = ''\n\n\nsx = SX1262(spi_bus=1, clk=10, mosi=11, miso=12, cs=3, irq=20, rst=15, gpio=2)\n\n# LoRa\nsx.begin(freq=868, bw=125.0, sf=12, cr=8, syncWord=0x12,\n power=-5, currentLimit=60.0, preambleLength=8,\n implicit=False, implicitLen=0xFF,\n crcOn=True, txIq=False, rxIq=False,\n tcxoVoltage=1.7, useRegulatorLDO=False, blocking=True)\n\n# FSK\n##sx.beginFSK(freq=923, br=48.0, freqDev=50.0, rxBw=156.2, power=-5, currentLimit=60.0,\n## preambleLength=16, dataShaping=0.5, syncWord=[0x2D, 0x01], syncBitsLength=16,\n## addrFilter=SX126X_GFSK_ADDRESS_FILT_OFF, addr=0x00, crcLength=2, crcInitial=0x1D0F, crcPolynomial=0x1021,\n## crcInverted=True, whiteningOn=True, whiteningInitial=0x0100,\n## fixedPacketLength=False, packetLength=0xFF, preambleDetectorLength=SX126X_GFSK_PREAMBLE_DETECT_16,\n## tcxoVoltage=1.6, useRegulatorLDO=False,\n## blocking=True)\n\n\nwhile True:\n \n if gnss_l76b.uart_any():\n sentence = parser.update(chr(gnss_l76b.uart_receive_byte()[0]))\n if sentence:\n \n #print('WGS84 Coordinate:Latitude(%c),Longitude(%c) %.9f,%.9f'%(parser.latitude[1],parser.longitude[1],parser.latitude[0],parser.longitude[0]))\n data = f'{(parser.latitude[1])},{(parser.longitude[1])},{(parser.latitude[0])},{(parser.longitude[0])},{(parser.altitude)},{(parser.hdop)},{(parser.satellites_in_use)}'\n\n print(data)\n\n #gnss_l76b.wgs84_to_bd09(parser.longitude[0],parser.latitude[0])\n #print('Baidu Coordinate: longitude(%c),latitudes(%c) %.9f,%.9f'%(parser.longitude[1],parser.latitude[1],gnss_l76b.Lon_Baidu,gnss_l76b.Lat_Baidu))\n #print('copy Baidu Coordinate and paste it on the baidu map web https://api.map.baidu.com/lbsapi/getpoint/index.html')\n \n #print('UTC Timestamp:%d:%d:%d'%(parser.timestamp[0],parser.timestamp[1],parser.timestamp[2]))\n \n# print fix status\n '''\n 1 : NO FIX\n 2 : FIX 2D\n 3 : FIX_3D\n '''\n print('Fix Status:', parser.fix_stat)\n \n print('Altitude:%d m'%(parser.altitude))\n print('Height Above Geoid:', parser.geoid_height)\n print('Horizontal Dilution of Precision:', parser.hdop)\n print('Satellites in Use by Receiver:', parser.satellites_in_use)\n time.sleep(2)\n\n data_to_send = data.encode('ascii')\n sx.send(data_to_send)\n time.sleep(2)\n \n \n else:\n sx.send(\"CANNOT GET GNSS SIGNAL\")\n print(\"CANNOT GET GNSS SIGNAL\")\n","repo_name":"Blueline-lab/LORA_GNSS_TRACKER","sub_path":"TX/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"22721467701","text":"from transformers import CamembertTokenizer\nimport torch \nfrom classifier import path_modele\n\n# The below function tokenizes the data and it has two arguments: data and max_length\ndef tokenization(data, max_length):\n\n '''\n args:\n - data: a pandas dataframe\n - max_length: int\n \n return:\n - input_ids: torch\n - attention_mask: torch\n\n '''\n print(\"Starting tokenization...\")\n\n tokenizer = CamembertTokenizer.from_pretrained(path_modele)\n\n input_ids = []\n attention_mask = []\n # Encoding every sentence\n for element in data:\n encoded_element = tokenizer.encode_plus(str(element), add_special_tokens=True, \n truncation=True, max_length=max_length,\n padding='max_length', return_tensors='pt')\n input_ids.append(encoded_element[\"input_ids\"])\n attention_mask.append(encoded_element[\"attention_mask\"])\n print(\"Tokenization finished !\")\n\n input_ids = torch.cat(input_ids, dim=0)\n attention_mask = torch.cat(attention_mask, dim=0)\n # return the input_ids and attention_mask\n return input_ids, attention_mask","repo_name":"borkounou/defi2","sub_path":"analysis_with_CamemBERT/Main_Model/tokenizer_local.py","file_name":"tokenizer_local.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"12160799306","text":"# This is a sudoku checker for day 1 of the devember challenge\n# Time started: 5pm\n# Time finished: 5:45pm\n\n# Create some sudoku data\nsudokuPuzzle = [\n\t[1, 2, 3, 4, 5, 6, 7, 8, 9],\n\t[4, 5, 6, 7, 8, 9, 1, 2, 3],\n\t[7, 8, 9, 1, 2, 3, 4, 5, 6],\n\t[2, 3, 4, 5, 6, 7, 8, 9, 1],\n\t[5, 6, 7, 8, 9, 1, 2, 3, 4],\n\t[8, 9, 1, 2, 3, 4, 5, 6, 7],\n\t[3, 4, 5, 6, 7, 8, 9, 1, 2],\n\t[6, 7, 8, 9, 1, 2, 3, 4, 5],\n\t[9, 1, 2, 3, 4, 5, 6, 7, 8]\n]\n\n# Create a function to draw a sudoku\ndef drawSudoku(sData):\n\t\n\t# Empty line\n\tprint(\"\")\n\t\n\t# For each row\n\tfor r in range(9):\n\t\t\n\t\t# Store the row as an empty string\n\t\trData = \"\"\n\t\t\n\t\t# Get the row\n\t\trow = sData[r]\n\t\t\n\t\t# For each number add to the row\n\t\tfor i in range(9):\n\t\t\t\n\t\t\t# Add the number\n\t\t\trData += \" {} | \".format(row[i])\n\t\t\t\n\t\t\t# If the current number is the last in box add another line\n\t\t\tif (i + 1) % 3 == 0 and i < 8:\n\t\t\t\t\n\t\t\t\t# Add the extra line\n\t\t\t\trData += \"|\"\n\t\t\t\t\n\t\t# Print the row\n\t\tprint(rData)\n\t\t\n\t\t\n\t\t# Check for the last row\n\t\tif r < 8:\n\t\t\t\n\t\t\t# Print the divider\n\t\t\tprint(\"-------------------------------------------------\")\n\t\t\t\n\t\t\t# Check for the dividers\n\t\t\tif (r + 1) % 3 == 0:\n\t\t\t\n\t\t\t\t# Print the divider\n\t\t\t\tprint(\"-------------------------------------------------\")\n\n# Function to check whether a sudoku is valid\ndef valid(sData):\n\t\n\t# Box data\n\tbox = []\n\t\n\t# For each row, column and box\n\tfor i in range(9):\n\t\t\n\t\t# Create some data to represent whether each row and column have each number\n\t\trow = []\n\t\tcolumn = []\n\t\t\n\t\t# Second iterator allows us to check each element individually\n\t\tfor j in range(9):\n\t\t\t\n\t\t\t# Check whether the given elements are already in their respective lists\n\t\t\t\n\t\t\t# Row\n\t\t\tif sData[i][j] in row:\n\t\t\t\t\n\t\t\t\t# return Invalid\n\t\t\t\treturn False\n\t\t\t\n\t\t\t# Add to the row list\n\t\t\trow.append(sData[i][j])\n\t\t\t\n\t\t\t# Column\n\t\t\tif sData[j][i] in column:\n\t\t\t\t\n\t\t\t\t# return Invalid\n\t\t\t\treturn False\n\t\t\t\n\t\t\t# Add to the row list\n\t\t\tcolumn.append(sData[j][i])\n\t\t\t\n\t\t\t# Calculate the correct box\n\t\t\tb = 0\n\t\t\t\n\t\t\t# Check which row we are looking at\n\t\t\tif i > 2:\n\t\t\t\tb += 3\n\t\t\tif i > 5:\n\t\t\t\tb += 3\n\t\t\tif j > 2:\n\t\t\t\tb += 1\n\t\t\tif j > 5:\n\t\t\t\tb += 1\n\t\t\t\t\n\t\t\t# Check that the box has been added\n\t\t\tif len(box) != b + 1:\n\t\t\t\tbox.append([])\n\t\t\t\t\n\t\t\t# Check if the number is already in the box\n\t\t\tif sData[i][j] in box[b]:\n\t\t\t\t\n\t\t\t\t# Return invalid\n\t\t\t\treturn False\n\t\t\t\t\n\t\t\t# Add it to the box\n\t\t\tbox[b].append(sData[i][j])\n\t\t\t\n\t# Return valid\n\treturn True\n\t\t\t\t\n\t\t\t\n# Check if the sudoku is valid\nif valid(sudokuPuzzle):\n\t# Print it\n\tdrawSudoku(sudokuPuzzle)\nelse:\n\tprint(\"Sorry that sudoku is invalid!\")\n","repo_name":"thebillington/devember17","sub_path":"day1/sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"27451827162","text":"# Программа на Python. Пусть имеется следующий словарь:\n#\n# d = {'one': 1, 'two': 2, 'natural': 1, 'True': 1, 'even': 2, 'three': 3, 'False': 0}\n# Сформируйте из него другой словарь d_unique, состоящий из данных с уникальными значениями (оставлять нужно последнее\n# значение, остальные отбрасывать).\n\nd = {'one': 1, 'two': 2, 'natural': 1, 'True': 1, 'even': 2, 'three': 3, 'False': 0}\n\nd_unique = {val: key for key, val in {v: k for k, v in d.items()}.items()}\nprint(d_unique)","repo_name":"LikeKugi/stepik_python","sub_path":"stepik/structures/create_unique_dict.py","file_name":"create_unique_dict.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"5376683869","text":"import math\nimport numpy as np\nimport scipy.linalg as sla\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import quad\nimport sys\n\nclass FluidFlow:\n def __init__(self, V_0, nu, a, b, c, d, L):\n self.V_0 = V_0\n self.nu = nu\n self.a = a #large side length\n self.b = b #length before boundary starts\n self.c = c #width of obstruction\n self.d = d #height of obstruction\n self.L = L # L is number of grid points along a\n self.h = a/L\n self.e_points = math.ceil(L*b/a)+1 #number points on E boundary\n self.c_points = math.ceil(L*c/a)+1 #'' ''\n self.a_points = int(L*(a-b-c)/a)+1\n self.f_points=L+1\n self.d_points=int(L*d/a)\n self.h_points=math.ceil(L*(a-d)/a)+1 #num of points above obstruction\n self.N = self.get_superindex(L, L) + 1\n self.psi_values = np.zeros((self.N)) #stream function matrix\n self.omega_values = np.zeros((self.N)) #vorticity matrix\n self.sortboundaries()\n\n # n is the number of relaxations performed, w is the overrelaxation factor\n def SOR(self, n, w = 1.5):\n self.initialize_free_flow()\n self.apply_boundary_cond([\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\"])\n for i in range(n):\n self.update_psi_interior(w)\n self.update_omega_interior(w)\n # self.residual_norm()\n self.apply_boundary_cond([\"B\", \"C\", \"D\", \"H\"])\n\n def initialize_free_flow(self):\n for I in range(self.N):\n i, j = self.get_coords(I)\n self.psi_values[I] = self.V_0 * self.a*j/self.L\n self.omega_values[I] = 0\n\n def apply_boundary_cond(self, boundaries):\n for boundary in boundaries:\n if boundary == \"A\":\n for I in self.Abound_points:\n self.psi_values[I] = 0\n self.omega_values[I] = 0\n if boundary == \"E\":\n for I in self.Ebound_points:\n self.psi_values[I] = 0\n self.omega_values[I] = 0\n if boundary == \"C\":\n for I in self.Cbound_points:\n i, j = self.get_coords(I)\n psi = self.psi_values[self.get_superindex(i, j + 1)]\n self.omega_values[I] = -2/(self.h**2)*psi\n self.psi_values[I] = 0\n if boundary == \"B\":\n for I in self.Bbound_points:\n i, j = self.get_coords(I)\n psi = self.psi_values[self.get_superindex(i + 1, j)]\n self.psi_values[I] = 0\n self.omega_values[I] = -2/(self.h**2)*psi\n if boundary == \"D\":\n for I in self.Dbound_points:\n i, j = self.get_coords(I)\n psi = self.psi_values[self.get_superindex(i - 1, j)]\n self.psi_values[I] = 0\n self.omega_values[I] = -2/(self.h**2)*psi\n if boundary == \"G\": #set to free flow conditions\n for I in self.Gbound_points:\n i, j = self.get_coords(I)\n self.psi_values[I] = self.V_0 * self.a*j/self.L\n self.omega_values[I] = 0\n if boundary == \"F\": #set to free flow conditions\n for I in self.Fbound_points:\n i, j = self.get_coords(I)\n self.psi_values[I] = self.V_0 * self.a*j/self.L\n self.omega_values[I] = 0\n if boundary == \"H\":\n for I in self.Hbound_points:\n i, j = self.get_coords(I)\n I_left = self.get_superindex(i-1, j)\n self.psi_values[I] = self.psi_values[I_left]\n self.omega_values[I] = self.omega_values[I_left]\n\n def update_psi_interior(self, w):\n for I in self.Interior_points:\n i, j = self.get_coords(I)\n self.psi_values[I] = (1-w)*self.psi_values[I] + \\\n (w/4)*(self.psi_values[self.get_superindex(i+1,j)] + \\\n self.psi_values[self.get_superindex(i-1,j)] + \\\n self.psi_values[self.get_superindex(i,j+1)] + \\\n self.psi_values[self.get_superindex(i,j-1)] - \\\n self.omega_values[I])\n\n def update_omega_interior(self, w):\n old_omega_values = self.omega_values\n for I in self.Interior_points:\n i, j = self.get_coords(I)\n d_psi_d_y = self.psi_values[self.get_superindex(i,j+1)] - \\\n self.psi_values[self.get_superindex(i,j-1)]/(2*self.h)\n d_omega_d_y = old_omega_values[self.get_superindex(i,j+1)] - \\\n old_omega_values[self.get_superindex(i,j-1)]/(2*self.h)\n d_psi_d_x = self.psi_values[self.get_superindex(i+1,j)] - \\\n self.psi_values[self.get_superindex(i-1,j)]/(2*self.h)\n d_omega_d_x = old_omega_values[self.get_superindex(i+1,j)] - \\\n old_omega_values[self.get_superindex(i-1,j)]/(2*self.h)\n\n self.omega_values[I] = (1-w)*self.omega_values[I] + \\\n (w/4)*(self.omega_values[self.get_superindex(i+1,j)] + \\\n self.omega_values[self.get_superindex(i-1,j)] + \\\n self.omega_values[self.get_superindex(i,j+1)] + \\\n self.omega_values[self.get_superindex(i,j-1)] - \\\n (1/(self.nu))*(d_psi_d_y*d_omega_d_x - d_psi_d_x*d_omega_d_y))\n\n def residual(self,i, j):\n laplacian_psi_omega=self.psi_values[self.get_superindex(i+1,j)] + \\\n self.psi_values[self.get_superindex(i-1,j)] + \\\n self.psi_values[self.get_superindex(i,j+1)] + \\\n self.psi_values[self.get_superindex(i,j-1)] - \\\n 4*self.psi_values[self.get_superindex(i,j)] + \\\n self.omega_values[self.get_superindex(i,j)]\n\n return laplacian_psi_omega\n\n def residual_norm(self):\n r_psi=self.residual()\n i_integran=quad(r_psi**2,)\n return\n\n def onboundary(self,I,specific=False): #checks if point on onboundary\n onbound=False\n L=self.L\n i,j=self.get_coords(I)\n if specific==False:\n if i==0 or i==L or j==0 or j==L: #F boundary\n onbound=True\n elif i==self.e_points-1 and j<=self.d_points: #D bound\n onbound=True\n elif i==self.e_points+self.c_points-2 and j<=self.d_points: #B bound\n onbound=True\n elif i<=self.e_points+self.c_points-2 and i >= self.e_points \\\n and j==self.d_points: #C bounds\n onbound=True\n return onbound\n else:\n if i==0: #F boundary\n whichbound='F'\n elif i==L: #H boundary\n whichbound='H'\n elif j==0 and i<=self.e_points-1: #E boundary\n whichbound='E'\n elif j==0 and i>=self.e_points+self.c_points-2: #A boundary\n whichbound='A'\n elif j==L: #G boundary\n whichbound='G'\n elif i==self.e_points-1 and j<=self.d_points : #D boundary\n whichbound='D'\n elif i==self.e_points+self.c_points-2 and j<=self.d_points:\n whichbound='B'\n elif i<=self.e_points+self.c_points-2 and i >= self.e_points \\\n and j==self.d_points: #C bounds\n whichbound='C'\n return whichbound\n\n def sortboundaries(self): #Imax is total number of verticies\n #sorts the verticies into different lists\n self.Interior_points=[]\n self.Abound_points=[]\n self.Bbound_points=[]\n self.Cbound_points=[]\n self.Dbound_points=[]\n self.Ebound_points=[]\n self.Fbound_points=[]\n self.Gbound_points=[]\n self.Hbound_points=[]\n for I in range(self.N):\n if self.onboundary(I)==False:\n self.Interior_points.append(I)\n else:\n if self.onboundary(I,True)=='A':\n self.Abound_points.append(I)\n elif self.onboundary(I,True)=='B':\n self.Bbound_points.append(I)\n elif self.onboundary(I,True)=='C':\n self.Cbound_points.append(I)\n elif self.onboundary(I,True)=='D':\n self.Dbound_points.append(I)\n elif self.onboundary(I,True)=='E':\n self.Ebound_points.append(I)\n elif self.onboundary(I,True)=='F':\n self.Fbound_points.append(I)\n elif self.onboundary(I,True)=='G':\n self.Gbound_points.append(I)\n elif self.onboundary(I,True)=='H':\n self.Hbound_points.append(I)\n\n def get_superindex(self, i, j):\n if i < self.e_points: #shifted to 0, incudes D boundary\n return i*self.f_points + j\n elif i < self.e_points + self.c_points - 2: #above obstruction\n if j >= self.d_points:\n Nbefore=self.f_points*self.e_points\n new_i = i - self.e_points\n new_j = j - self.d_points\n return Nbefore + new_i*(self.h_points) + new_j\n else:\n raise Exception(\"Inaccessible index\")\n else:\n Nbefore = self.f_points*self.e_points + self.h_points*(self.c_points-2)\n new_i = i - self.e_points - (self.c_points - 2)\n return Nbefore + new_i*(self.f_points) + j\n\n def get_coords(self, I): #gets coordinates from superindex I\n if I < self.f_points*self.e_points:\n i = int(I/self.f_points)\n j = I%(self.f_points)\n return i,j\n elif I < self.f_points*self.e_points + self.h_points*(self.c_points-2):\n new_I = I - self.f_points*self.e_points\n i = int(new_I/self.h_points) + self.e_points\n j = new_I%(self.h_points) + self.d_points\n return i,j\n else:\n new_I = I - self.f_points*self.e_points - self.h_points*(self.c_points-2)\n i = int(new_I/self.f_points) + self.e_points + (self.c_points - 2)\n j = new_I%(self.f_points)\n return i,j\n\n def print_psi(self):\n matrix = np.zeros((self.L+1, self.L+1))\n for I in range(self.N):\n i, j = self.get_coords(I)\n matrix[self.L - j][i] = self.psi_values[I]\n np.set_printoptions(precision=3)\n print(matrix)\n\n def print_omega(self):\n matrix = np.zeros((self.L+1, self.L+1))\n for I in range(self.N):\n i, j = self.get_coords(I)\n matrix[self.L - j][i] = self.omega_values[I]\n np.set_printoptions(precision=3)\n print(matrix)\n\n\ndef main():\n V_0 = 1\n nu = 0.1\n region_dim = 1.0\n front_of_plate = 0.25\n back_of_plate = 0.75\n top_of_plate = 0\n L = int(sys.argv[1])\n ff = FluidFlow(V_0, nu, region_dim, front_of_plate,\n back_of_plate - front_of_plate, top_of_plate, L)\n num_relaxations = int(sys.argv[2])\n ff.SOR(num_relaxations)\n ff.print_psi()\n ff.print_omega()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"willdbk/CompPhysProj2","sub_path":"project2.py","file_name":"project2.py","file_ext":"py","file_size_in_byte":11115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"2595678397","text":"import pybreaker\nimport pytest\nimport redis\nimport shortener_api.constants as constants\nfrom shortener_api.service_logic.repository import RedisRepository\nfrom tests.shared_code import get_null_log\n\nREDIS_CREATE_PATH = \"redis.StrictRedis.__init__\"\nREDIS_GET_PATH = \"redis.StrictRedis.get\"\nREDIS_SET_PATH = \"redis.StrictRedis.set\"\nREDIS_HOST = \"host\"\nREDIS_PORT = \"port\"\nCONFIG = {\n constants.ENV_REDIS_HOST: REDIS_HOST,\n constants.ENV_REDIS_PORT: REDIS_PORT,\n}\n\n\ndef test_should_create_redis_client_with_correct_parameters(mocker):\n strict_redis_mock = mocker.patch(REDIS_CREATE_PATH, return_value=None)\n mocker.patch(REDIS_GET_PATH)\n repository = RedisRepository(get_null_log(), CONFIG)\n\n repository.read_value(\"key\")\n\n strict_redis_mock.assert_called_once_with(host=REDIS_HOST, port=REDIS_PORT, db=0)\n\n\ndef test_should_store_key_to_redis(mocker):\n mocker.patch(REDIS_CREATE_PATH, return_value=None)\n redis_mock = mocker.patch(REDIS_SET_PATH)\n repository = RedisRepository(get_null_log(), CONFIG)\n key = \"some key\"\n value = \"some value\"\n\n repository.save_value(key, value)\n\n redis_mock.assert_called_once_with(key, value)\n\n\ndef test_should_read_key_from_redis(mocker):\n mocker.patch(REDIS_CREATE_PATH, return_value=None)\n value = \"some value\"\n redis_mock = mocker.patch(REDIS_GET_PATH, return_value=value.encode())\n repository = RedisRepository(get_null_log(), CONFIG)\n key = \"some key\"\n\n result = repository.read_value(key)\n\n assert value == result\n redis_mock.assert_called_once_with(key)\n\n\ndef test_should_read_key_from_redis_and_return_none_if_not_found(mocker):\n mocker.patch(REDIS_CREATE_PATH, return_value=None)\n redis_mock = mocker.patch(REDIS_GET_PATH, return_value=None)\n repository = RedisRepository(get_null_log(), CONFIG)\n key = \"some key\"\n\n result = repository.read_value(key)\n\n assert result is None\n redis_mock.assert_called_once_with(key)\n\n\ndef test_should_throw_breaker_error_and_not_call_redis_after_three_fails_on_write(mocker):\n mocker.patch(REDIS_CREATE_PATH, return_value=None)\n redis_mock = mocker.patch(REDIS_SET_PATH)\n redis_mock.side_effect = [redis.TimeoutError(), redis.TimeoutError(), redis.TimeoutError()]\n repository = RedisRepository(get_null_log(), CONFIG)\n\n for _ in range(2):\n with pytest.raises(redis.TimeoutError):\n repository.save_value(\"key\", \"value\")\n for _ in range(2):\n with pytest.raises(pybreaker.CircuitBreakerError):\n repository.save_value(\"key\", \"value\")\n\n assert 3 == redis_mock.call_count\n\n\ndef test_should_throw_breaker_error_and_not_call_redis_after_three_fails_on_read(mocker):\n mocker.patch(REDIS_CREATE_PATH, return_value=None)\n redis_mock = mocker.patch(REDIS_GET_PATH)\n redis_mock.side_effect = [redis.TimeoutError(), redis.TimeoutError(), redis.TimeoutError()]\n repository = RedisRepository(get_null_log(), CONFIG)\n\n for _ in range(2):\n with pytest.raises(redis.TimeoutError):\n repository.read_value(\"key\")\n for _ in range(2):\n with pytest.raises(pybreaker.CircuitBreakerError):\n repository.read_value(\"key\")\n\n assert 3 == redis_mock.call_count\n","repo_name":"ReitenSchnell/url-shortener","sub_path":"tests/service_logic/repository_test.py","file_name":"repository_test.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"72209139917","text":"import os\nimport time\nfrom tqdm import tqdm\nimport json\nimport argparse\n\n\nparser = argparse.ArgumentParser()\n# Data input settings\nparser.add_argument('--loop_limit', type=int, default=1)\nparser.add_argument('--stop_count', type=int, default=1000)\nparser.add_argument('--save_checkpoint_every', type=int, default=1)\nparser.add_argument('--start_eval_epoch', type=int, default=130)\nparser.add_argument('--scope', type=str, default='')\n#parser.add_argument('--modality', type=str, default='ic')\n#parser.add_argument('--through_encoder', nargs='+', type=int, default=[1, 1, 1])\n#parser.add_argument('--mean_feats', nargs='+', type=int, default=[0, 0, 0])\nparser.add_argument('--gpu', type=str, default='2')\nparser.add_argument('--option', type=int ,default=0)\nparser.add_argument('--d', type=str, default='MSRVTT')\nparser.add_argument('--m', type=str, default='ica')\nparser.add_argument('--S2ADRM_type', type=str, default='zi')\nparser.add_argument('--load_pretrained', type=str, default='\\'\\'')\nparser.add_argument('--c3d_feats_name', type=str, default='c3d_60_fc6.hdf5')\nparser.add_argument('--first_evaluate_whole_folder', action='store_true')\nparser.add_argument('--usePAA', action='store_true')\nparser.add_argument('--acoustic', nargs='+', type=int, default=[256, 260])\n\nparser.add_argument('-a', '--activation', default='tanh', type=str)\nparser.add_argument('-at', '--activation_type', default='acc', type=str)\nparser.add_argument('--type_PAA', default=0, type=int)\nparser.add_argument('--with_modality_att', default=False, action='store_true')\nparser.add_argument('--merge_type', default='o', type=str)\nparser.add_argument('--random_type', default='segment_random', type=str)\nparser.add_argument('--equally_sampling', default=False, action='store_true')\nparser.add_argument('--att_mid_size', default=256, type=int)\nparser.add_argument('--use_bn', default=False, action='store_true')\nparser.add_argument('--fusion_type', default='addition', type=str)\n\nparser.add_argument('--encoder_type', default='gru', type=str)\nparser.add_argument('--decoder_type', default='lstm', type=str)\nparser.add_argument('--use_preEncoder', default=False, action='store_true')\nparser.add_argument('--preEncoder_type', default='linear', type=str)\nparser.add_argument('--preEncoder_modality', default='\\'\\'', type=str)\nparser.add_argument('--concat_before_att', default=False, action='store_true')\nparser.add_argument('--dim_encoder_hiddenC', default=512, type=int)\n\nparser.add_argument('--scheduled_sampling', default=False, action='store_true')\nparser.add_argument('--ss_type', default=1, type=int)\nparser.add_argument('--ss_k', default=100.0, type=float)\nparser.add_argument('--ss_linear', nargs='+', default=[100, 0.7], type=float)\nparser.add_argument('--ss_piecewise', nargs='+', default=[150, 0.95, 0.65], type=float)\n\nparser.add_argument('--seed', default=1002, type=int)\nparser.add_argument('--mo', default='\\'\\'', type=str)\nparser.add_argument('--mi', default='\\'\\'', type=str)\nparser.add_argument('-alm', '--all_level_modality', nargs='+', type=int, default=[0, 0, 0, 0])\nparser.add_argument('--n_frames', default=8, type=int)\nparser.add_argument('--dim_guidance', default=128, type=int)\nparser.add_argument('--guidance_type', default='full', type=str)\nparser.add_argument('--no_DMHE_bn', default=False, action='store_true')\nparser.add_argument('--att_dropout', default=0.0, type=float)\nparser.add_argument('--together', default=False, action='store_true')\n\nparser.add_argument('--forget_bias', default=0.6, type=float)\nparser.add_argument('--keyword', default='\\'\\'', type=str)\nparser.add_argument('--no_save_best', default=False, action='store_true')\nparser.add_argument('--cheat', default=False, action='store_true')\nparser.add_argument('--bidirection', default=False, action='store_true')\nparser.add_argument('--dim_encoder_hidden', default=512, type=int)\nparser.add_argument('--grad_clip', default=5, type=float)\nparser.add_argument('--twostream', default=False, action='store_true')\nparser.add_argument('--use_MA', default=False,action='store_true')\nparser.add_argument('--connect_type', default='Direct', type=str)\nparser.add_argument('--global_type', default='Flow', type=str)\nparser.add_argument('--ss_wise', default=False, action='store_true')\nparser.add_argument('--baseline_addition', default=False, action='store_true')\nparser.add_argument('--dim_global', default=128, type=int)\nparser.add_argument('--category_type', default=1, type=int)\nargs = parser.parse_args()\n#loop_limit = args.loop_limit\nif '.hdf5' in args.c3d_feats_name:\n if args.d == 'MSRVTT':\n args.c3d_feats_name = 'msrvtt_' + args.c3d_feats_name\n else:\n args.c3d_feats_name = 'msvd_' + args.c3d_feats_name\n#dataset = 'Youtube2Text'\ncheckpoint_path_name = '1006save'\nmodel = 'DFM_Model'\nk_best_model = 3\nsave_model_limit = 50\nteacher_prob = 1\nlearning_rate_decay_rate = 0.994\nlearning_rate_decay_every = 1\nbeam_size = 1\ntrain_type = '1'\nbatch_size = {'MSRVTT': 128, 'Youtube2Text': 64}\nword_count_threshold = {'MSRVTT': 2, 'Youtube2Text': 0}\nmax_len = {'MSRVTT': 30, 'Youtube2Text': 20}\ntrain_max_len = {'MSRVTT': 30, 'Youtube2Text': 20}\ndropout_p = {'MSRVTT': 0.5, 'Youtube2Text': 0.5}\nepochs = {'MSRVTT': 500, 'Youtube2Text': 500}\nlearning_rate = {'MSRVTT': 1e-3, 'Youtube2Text': 1e-3}\ndiscriminative_feats_name = {'MSRVTT': '\\'\\'', 'Youtube2Text': '\\'\\''}\ndim_d = {'MSRVTT': 1024, 'Youtube2Text': 1024}\nacoustic_feats_name = {'MSRVTT': ['msrvtt_vggish.hdf5'], 'Youtube2Text': ['msvd_vggish.hdf5']}#msvd_mfcc_samples.hdf5\nfeats_name = {'MSRVTT': ['msrvtt_IRv2.hdf5'], 'Youtube2Text': ['msvd_IRv2.hdf5']}\ndim_acoustic = {'MSRVTT': 128, 'Youtube2Text': 128}\ninput_json_name = {'MSRVTT': 'videodatainfo', 'Youtube2Text': 'videodatainfo'}\ninfo_json_name = {'MSRVTT': 'info_pad_mask', 'Youtube2Text': 'info'}\ncaption_json_name = {'MSRVTT': 'caption_pad_mask', 'Youtube2Text': 'caption'}\n\n\nacoustic_post_name = ''\nif 'a' in args.m:\n acoustic_post_name = '_Av'\n if args.acoustic[0]:\n acoustic_feats_name['MSRVTT'].append('vtt_boaw%d.hdf5' % args.acoustic[0])\n acoustic_feats_name['Youtube2Text'].append('msvd_boaw%d.hdf5' % args.acoustic[0])\n dim_acoustic['MSRVTT'] += args.acoustic[0]\n dim_acoustic['Youtube2Text'] += args.acoustic[0]\n acoustic_post_name += 'b%d' % args.acoustic[0]\n\n if args.acoustic[1]:\n acoustic_feats_name['MSRVTT'].append('fvdb_%d.hdf5' % args.acoustic[1])\n acoustic_feats_name['Youtube2Text'].append('msvd_fvdb_%d.hdf5' % args.acoustic[1])\n dim_acoustic['MSRVTT'] += args.acoustic[1]\n dim_acoustic['Youtube2Text'] += args.acoustic[1]\n acoustic_post_name += 'f%d' % args.acoustic[1]\n\ndiscriminative_feats_dir = {'MSRVTT': '\\'\\'', 'Youtube2Text': '\\'\\''}\nnum_topics = 20\nltm_dir = '\\'\\''\n\nif 'c3d' in args.c3d_feats_name:\n #dim_c3d=2048\n dim_c3d = 4096 if 'fc6' in args.c3d_feats_name else 512\nelse:\n dim_c3d = 2048\n\nif args.option == 0:\n #dataset = 'MSRVTT'\n dataset = args.d\n modality_list = [args.m] * 1\n modality_zo_list = [args.mo] * 1\n modality_zi_list = [args.mi] * 1\n through_encoder_list = [[1, 1, 1, 1]] * 12\n mean_feats_list = [[0, 0, 0, 0]] * 12\n att_info_list = [[1, 0, 0, 0, 0]] * 12 \n \n if args.equally_sampling: rt = 'ES'\n else: rt = 'AR' if args.random_type == 'all_random' else 'SR'\n\n #prename = '%s_%s%s%s%d_wMA%d' % (rt, args.activation.upper(), args.activation_type, args.fusion_type.upper(), args.att_mid_size, 1 if args.with_modality_att else 0)\n prename = '%s%dwt%d_G%s_R%s%s%s%s%s' % (rt,args.n_frames, word_count_threshold[dataset], args.global_type, args.connect_type, ('_pE%s'%args.preEncoder_modality) if args.use_preEncoder else '', '_wise' if args.ss_wise else '', ('_SS%d'%args.ss_type) if args.scheduled_sampling else '', '_together' if args.together else '')\n if args.ss_type == 1 and args.scheduled_sampling:\n \tprename += '_%d_%d' % (args.ss_linear[0], int(100 * args.ss_linear[1]))\n scope_list = [ \n args.scope + acoustic_post_name, \n ]\n if args.d == 'Youtube2Text':\n \ttmp = ' --useS2ADRM --concat_before_att '\n else:\n \ttmp = ' --useS2ADRM --concat_before_att --use_ltm '\n scope_list = [prename+item for item in scope_list]\n info_list = [\n #' --useS2ADRM --use_preEncoder --preEncoder_type linear --dim_encoder_hiddenI 512 --dim_encoder_hiddenC 512 ',\n #' --useS2ADRM --scheduled_sampling --ss_k 70 ',\n tmp\n ]\n \n assert len(modality_list) == len(scope_list)\n assert len(scope_list) == len(info_list)\n\n\n#forget_bias = [(item / 10) for item in range(2, 21, 2)]\n#for fb in forget_bias:\n# args.forget_bias = fb\n\n#dim_encoder_hiddenC = [item for item in range(128, 1025, 128)]\n#for dc in dim_encoder_hiddenC:\n# args.dim_encoder_hiddenC = dc\n\n#nframes = [item for item in range(5, 21)]\n#for nf in nframes:\n# args.n_frames = nf\nprint('loop_limit: ', args.loop_limit)\nfor i in range(args.loop_limit):\n for j in range(len(info_list)):\n scope = scope_list[j]\n modality = modality_list[j]\n modality_zo = modality_zo_list[j]\n modality_zi = modality_zi_list[j]\n print(modality, modality_zo, modality_zi)\n if not modality: modality = '\\'\\''\n if not modality_zo: modality_zo = '\\'\\''\n if not modality_zi: modality_zi = '\\'\\''\n through_encoder = through_encoder_list[j]\n mean_feats = mean_feats_list[j]\n info = info_list[j]\n att_info = att_info_list[j]\n #######\n\n tmp = ' --first_evaluate_whole_folder ' if args.first_evaluate_whole_folder else ''\n paa = ' --usePAA ' if args.usePAA else ''\n if args.with_modality_att:\n tmp += ' --with_modality_att '\n if args.equally_sampling:\n tmp += ' --equally_sampling '\n if args.use_bn:\n tmp += ' --use_bn '\n if args.use_preEncoder:\n tmp += ' --use_preEncoder '\n if args.concat_before_att:\n tmp += ' --concat_before_att '\n if args.scheduled_sampling:\n tmp += ' --scheduled_sampling '\n if not args.no_DMHE_bn:\n tmp += ' --no_DMHE_bn '\n if args.together:\n tmp += ' --together '\n if args.no_save_best:\n tmp += ' --no_save_best '\n if args.cheat:\n tmp += ' --cheat '\n if args.bidirection:\n tmp += ' --bidirection '\n if args.twostream:\n tmp += ' --2stream '\n if args.use_MA:\n \ttmp += ' --use_MA '\n if args.ss_wise:\n tmp += ' --ss_wise '\n if args.baseline_addition:\n \ttmp += ' --baseline_addition '\n op = 'CUDA_VISIBLE_DEVICES='+ args.gpu\\\n +' python train.py '\\\n +' --scope ' + scope\\\n +' --dataset ' + dataset\\\n +' --batch_size ' + str(batch_size[dataset])\\\n +' --max_len ' + str(max_len[dataset])\\\n +' --word_count_threshold ' + str(word_count_threshold[dataset])\\\n +' --k_best_model ' + str(k_best_model)\\\n +' --save_model_limit ' + str(save_model_limit)\\\n +' --teacher_prob ' + str(teacher_prob)\\\n +' --learning_rate ' + str(learning_rate[dataset])\\\n +' --model DFM_Model '\\\n +' --modality ' + modality \\\n +' --through_encoder ' + ' '.join([str(k) for k in through_encoder]) \\\n +' --mean_feats ' + ' '.join([str(k) for k in mean_feats])\\\n +' --att_info ' + ' '.join([str(k) for k in att_info])\\\n +' --input_dropout_p ' + str(dropout_p[dataset])\\\n +' --output_dropout_p ' + str(dropout_p[dataset])\\\n +' --epochs ' + str(epochs[dataset])\\\n +' --dim_d ' + str(dim_d[dataset])\\\n +' --discriminative_feats_name ' + discriminative_feats_name[dataset]\\\n +' --acoustic_feats_name ' + ' '.join(acoustic_feats_name[dataset])\\\n +' --dim_acoustic ' + str(dim_acoustic[dataset])\\\n +' --loop_limit ' + str(i+1)\\\n +' --loop_start ' + str(i)\\\n +' --save_checkpoint_every ' + str(args.save_checkpoint_every)\\\n +' --stop_count ' + str(args.stop_count)\\\n +' --start_eval_epoch ' + str(args.start_eval_epoch)\\\n +' --train_type ' + str(train_type)\\\n +' --learning_rate_decay_rate ' + str(learning_rate_decay_rate)\\\n +' --learning_rate_decay_every ' + str(learning_rate_decay_every)\\\n +' --beam_size ' + str(beam_size)\\\n +' --discriminative_feats_dir ' + discriminative_feats_dir[dataset]\\\n +' --train_max_len ' + str(train_max_len[dataset])\\\n +' --checkpoint_path_name ' + checkpoint_path_name\\\n +' --model ' + model\\\n +' --ltm_dir '+ ltm_dir\\\n +' --num_topics '+ str(num_topics)\\\n +' --S2ADRM_type '+args.S2ADRM_type\\\n +' --load_pretrained ' + args.load_pretrained\\\n +' --c3d_feats_name ' + args.c3d_feats_name\\\n +' --dim_c3d ' + str(dim_c3d)\\\n +' --activation ' + args.activation\\\n +' --activation_type ' + args.activation_type\\\n +' --type_PAA ' + str(args.type_PAA)\\\n +' --merge_type ' + args.merge_type\\\n +' --random_type ' + args.random_type\\\n +' --att_mid_size ' + str(args.att_mid_size)\\\n +' --fusion_type ' + args.fusion_type\\\n +' --encoder_type ' + args.encoder_type\\\n +' --decoder_type ' + args.decoder_type\\\n +' --seed ' + str(args.seed)\\\n +' --modality_zo ' + modality_zo\\\n +' --modality_zi ' + modality_zi\\\n +' --all_level_modality ' + ' '.join([str(k) for k in args.all_level_modality])\\\n +' --n_frames ' + str(args.n_frames)\\\n +' --dim_guidance ' + str(args.dim_guidance)\\\n +' --guidance_type ' + args.guidance_type\\\n +' --feats_name ' + ' '.join(feats_name[dataset])\\\n +' --preEncoder_type ' + args.preEncoder_type\\\n +' --preEncoder_modality ' + args.preEncoder_modality\\\n +' --dim_encoder_hiddenC ' + str(args.dim_encoder_hiddenC)\\\n +' --ss_k ' + str(args.ss_k)\\\n +' --ss_type ' + str(args.ss_type)\\\n +' --ss_linear ' + ' '.join([str(k) for k in args.ss_linear])\\\n +' --ss_piecewise ' + ' '.join([str(k) for k in args.ss_piecewise])\\\n +' --input_json_name ' + input_json_name[dataset]\\\n +' --info_json_name ' + info_json_name[dataset]\\\n +' --caption_json_name ' + caption_json_name[dataset]\\\n +' --att_dropout ' + str(args.att_dropout)\\\n +' --forget_bias ' + str(args.forget_bias)\\\n +' --keyword ' + args.keyword\\\n +' --dim_encoder_hidden ' + str(args.dim_encoder_hidden)\\\n +' --grad_clip ' + str(args.grad_clip)\\\n +' --connect_type ' + args.connect_type\\\n +' --global_type ' + args.global_type\\\n +' --dim_global ' + str(args.dim_global)\\\n +' --category_type ' + str(args.category_type)\\\n + paa\\\n + tmp\\\n + info\n\n print(op)\n \n os.system(op)\n\n\n'''\npython train_pipe.py --m ica --acoustic 256 260 --gpu 0 --encoder_type gsru --decoder_type lstm \\\n--S2ADRM_type 2branch --c3d_feats_name msrvtt_c3d_60_fc6.hdf5 --concat_before_att --n_frames 10 \\\n--random_type segment_random --use_preEncoder --preEncoder_modality c --scope _RMSprop\n\npython train_pipe.py --m ica --acoustic 256 260 --gpu 1 --encoder_type gsru --decoder_type lstm \\\n--S2ADRM_type 2branch --c3d_feats_name msrvtt_c3d_60_fc6.hdf5 --concat_before_att --n_frames 10 \\\n--equally_sampling --use_preEncoder --preEncoder_modality c --scope _RMSprop\n'''","repo_name":"ybCliff/VideoCaptioning","sub_path":"train_pipe.py","file_name":"train_pipe.py","file_ext":"py","file_size_in_byte":15769,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"9889852815","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 13 15:40:04 2022\n\n@author: werefkin\n\"\"\"\n\n\n\nimport numpy as np\nimport time as times\nimport sys\nfrom pyqtgraph.Qt import QtCore\nimport qtmodern.styles\nimport qtmodern.windows# import pyqtgraph as pg\n# from pyqtgraph.ptime import time\nfrom gui_design import Ui_Hypersen\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtCore import pyqtSlot\n\n#########################################\n# main\n#########################################\n\nglobal fname, data, flag, mon, monitor, monitor_number\nfname = 'test.txt'\ndata = np.zeros(1024)\nflag = 1\n\n\n\n\n\nclass Window(QtWidgets.QMainWindow):\n def __init__(self):\n super().__init__()\n self.ui = Ui_Hypersen()\n self.ui.setupUi(self)\n self.show()\n \n #THREADS\n self.measurement = Measure(self)\n \n #PLOT\n self.ui.plotSignal.show()\n self.ui.p = self.ui.plotSignal.plot()\n self.ui.plotSignal.setLabel('left', 'Signal', units='au')\n\n #BUTTONS\n self.ui.startButton.clicked.connect(self.measurement.start)\n self.ui.startButton.clicked.connect(self.flag_zero)\n self.ui.stopButton.clicked.connect(self.flag_one)\n \n #TIMER\n self.timer = QtCore.QTimer()\n self.timer.timeout.connect(self.update)\n self.timer.start(100)\n \n #SIGNALS\n self.measurement.status.connect(self.addlogline)\n \n def update(self):\n self.displaydata = data[:]\n self.ui.p.setData(self.displaydata, name='Raw Signal')\n \n def flag_one(self):\n # do stuff\n global flag\n flag = 1\n \n def flag_zero(self):\n # do stuff\n global flag\n flag = 0\n \n def closeEvent(self, event):\n global flag\n flag=1\n times.sleep(0.5)\n event.accept()\n QApplication.quit()\n \n @pyqtSlot(object)\n def addlogline(self, status_text):\n self.ui.logs.append(status_text) \n \n\n\nclass Measure(QtCore.QThread):\n \n status = QtCore.pyqtSignal(object)\n\n def __init__(self, parent=None):\n QtCore.QThread.__init__(self, parent)\n\n def run(self):\n global data, flag\n \n self.ms_msg='Measurements started'\n self.status.emit(self.ms_msg)\n while flag == 0:\n #time.sleep(0.5)\n print('shape data: ', np.shape(data))\n print('flag: ', flag)\n data=np.random.rand(1500)\n\n\nif __name__ == '__main__':\n app = QtCore.QCoreApplication.instance()\n if app is None:\n app = QtWidgets.QApplication(sys.argv)\n qtmodern.styles.dark(app)\n w = Window()\n mw = qtmodern.windows.ModernWindow(w)\n mw.show()\n w.show()\n # sys.exit(app.exec_)\n sys.exit(app.exec_())\n\n# this is the end","repo_name":"werefkin/simple-python-gui-example","sub_path":"complex_case/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"25928144073","text":"from collections import namedtuple\nimport csv\nimport datetime\nimport decimal\nfrom io import StringIO\nimport typing\nfrom typing import Callable\nfrom typing import Iterable\n\nfrom pydantic.v1.main import BaseModel\nimport pytz\n\nimport pcapi.core.finance.api as finance_api\nimport pcapi.core.finance.repository as finance_repository\nimport pcapi.core.finance.utils as finance_utils\nfrom pcapi.core.offers.serialize import serialize_offer_type_educational_or_individual\nfrom pcapi.models.api_errors import ApiErrors\nfrom pcapi.utils.date import MONTHS_IN_FRENCH\nfrom pcapi.utils.date import utc_datetime_to_department_timezone\nfrom pcapi.utils.string import u_nbsp\n\n\ndef format_number_as_french(num: int | float) -> str:\n return str(num).replace(\".\", \",\")\n\n\ndef _build_full_address(street: str | None, postal_code: str | None, city: str | None) -> str:\n return \" \".join((street or \"\", postal_code or \"\", city or \"\"))\n\n\ndef _get_validation_period(cutoff: datetime.datetime) -> str:\n \"\"\"Indicate the 2-week period during which most(*) bookings have\n been validated that correspond with the requested cutoff.\n\n If the cutoff is 16/01: \"... janvier : 1ère quinzaine\".\n If the cutoff is 01/02: \"... janvier : 2nde quinzaine\".\n\n (*) most bookings, but not all. Some bookings may have been\n validated months ago, but reimbursed much later because there was\n no bank information yet or because the event had not yet occurred.\n \"\"\"\n # `cutoff` is the _exclusive_ upper bound of the period (i.e. the\n # first second of the day after the last included day).\n cutoff_day = pytz.utc.localize(cutoff).astimezone(finance_utils.ACCOUNTING_TIMEZONE).date()\n last_day = cutoff_day - datetime.timedelta(days=1)\n month = MONTHS_IN_FRENCH[last_day.month]\n if last_day.day == 15:\n fortnight = \"1ère quinzaine\"\n else:\n fortnight = \"2nde quinzaine\"\n return f\"Validées et remboursables sur {month} : {fortnight}\"\n\n\ndef _legacy_get_validation_period(transaction_label: str) -> str:\n \"\"\"Indicate the 2-week period during which most(*) bookings have been\n validated that correspond with the requested `Payment.transactionLabel`.\n\n Turn \"pass Culture Pro - remboursement 1ère quinzaine 06-2019\"\n into \"Validées et remboursables sur mai : 2nde quinzaine\".\n\n We don't want to show what's in `Payment.transactionLabel`,\n because it was unclear.\n \"\"\"\n fortnight, month_year = transaction_label.replace(\"pass Culture Pro - remboursement\", \"\").rsplit(\" \", 1)\n label_month_number = int(month_year.split(\"-\")[0])\n if \"1ère quinzaine\" in fortnight:\n fortnight = \"2nde quinzaine\"\n period_month = label_month_number - 1 if label_month_number > 1 else 11\n else:\n fortnight = \"1ère quinzaine\"\n period_month = label_month_number\n month_name = MONTHS_IN_FRENCH[int(period_month)]\n return f\"Validées et remboursables sur {month_name} : {fortnight}\"\n\n\nclass ReimbursementDetails:\n CSV_HEADER = [\n \"Réservations concernées par le remboursement\",\n \"Date du justificatif\",\n \"N° du justificatif\",\n \"N° de virement\",\n \"Point de remboursement\",\n \"Adresse du point de remboursement\",\n \"SIRET du point de remboursement\",\n \"IBAN\",\n \"Raison sociale du lieu\",\n \"Adresse du lieu\",\n \"SIRET du lieu\",\n \"Nom de l'offre\",\n \"N° de réservation (offre collective)\",\n \"Nom (offre collective)\",\n \"Prénom (offre collective)\",\n \"Nom de l'établissement (offre collective)\",\n \"Date de l'évènement (offre collective)\",\n \"Contremarque\",\n \"Date de validation de la réservation\",\n \"Intitulé du tarif\",\n \"Montant de la réservation\",\n \"Barème\",\n \"Montant remboursé\",\n \"Type d'offre\",\n ]\n\n # The argument is not a named tuple, but rather an SQLAlchemy\n # result object, but both are as opaque to mypy, which hence\n # reports \"attr-defined\" errors on almost every line. Instead of\n # polluting the code with dozens of \"ignore\" comments, disable\n # typing for the whole method.\n @typing.no_type_check\n def __init__(self, payment_info: namedtuple):\n # FIXME (dbaty, 2021-01-14): once we have created\n # pricing+cashflow data for pre-2022 payments, remove handling\n # of legacy Payment data from this function.\n using_legacy_models = hasattr(payment_info, \"transaction_label\")\n\n # Validation period\n if using_legacy_models:\n self.validation_period = _legacy_get_validation_period(payment_info.transaction_label)\n else:\n self.validation_period = _get_validation_period(payment_info.cashflow_batch_cutoff)\n\n # Invoice info\n if using_legacy_models:\n self.invoice_date = \"\"\n self.invoice_reference = \"\"\n self.cashflow_batch_label = \"\"\n else:\n self.invoice_date = payment_info.invoice_date\n self.invoice_reference = payment_info.invoice_reference\n self.cashflow_batch_label = payment_info.cashflow_batch_label\n\n # Venue info\n self.venue_name = payment_info.venue_name\n self.venue_common_name = payment_info.venue_common_name\n self.venue_address = _build_full_address(\n payment_info.venue_address,\n payment_info.venue_postal_code,\n payment_info.venue_city,\n )\n self.venue_siret = payment_info.venue_siret\n\n # Reimbursement point info + IBAN\n if using_legacy_models:\n self.reimbursement_point_common_name = self.venue_common_name\n self.reimbursement_point_siret = self.venue_siret\n self.reimbursement_point_address = self.venue_address\n else:\n self.reimbursement_point_common_name = payment_info.reimbursement_point_common_name\n self.reimbursement_point_siret = payment_info.reimbursement_point_siret\n self.reimbursement_point_address = _build_full_address(\n payment_info.reimbursement_point_address,\n payment_info.reimbursement_point_postal_code,\n payment_info.reimbursement_point_city,\n )\n self.iban = payment_info.iban\n\n # Offer, redactor and booking info\n self.offer_name = payment_info.offer_name\n self.booking_token = getattr(payment_info, \"booking_token\", None)\n self.booking_used_date = payment_info.booking_used_date\n self.booking_price_category_label = getattr(payment_info, \"booking_price_category_label\", None)\n self.booking_total_amount = format_number_as_french(\n payment_info.booking_amount * getattr(payment_info, \"booking_quantity\", 1)\n )\n\n # Collective offer specific fields\n self.redactor_last_name = getattr(payment_info, \"redactor_lastname\", \"\")\n self.redactor_first_name = getattr(payment_info, \"redactor_firstname\", \"\")\n self.event_date = getattr(payment_info, \"event_date\", \"\")\n self.institution_name = getattr(payment_info, \"institution_name\", \"\")\n venue_departement_code = getattr(payment_info, \"venue_departement_code\", None)\n if self.event_date and venue_departement_code:\n timezoned_event_date = utc_datetime_to_department_timezone(self.event_date, venue_departement_code)\n self.event_date = timezoned_event_date.strftime(\"%d/%m/%Y %H:%M\")\n\n # Reimbursement rate and amount\n if using_legacy_models:\n if payment_info.reimbursement_rate:\n rate = f\"{int(payment_info.reimbursement_rate * 100)}%\"\n else:\n rate = \"\"\n else: # using Pricing.standardRule or Pricing.customRule\n rule = finance_api.find_reimbursement_rule(payment_info.rule_name or payment_info.rule_id)\n if rule.rate:\n rate = decimal.Decimal(rule.rate * 100).quantize(decimal.Decimal(\"0.01\"))\n if rate == int(rate): # omit decimals if round number\n rate = int(rate)\n rate = format_number_as_french(rate) + f\"{u_nbsp}%\"\n else:\n rate = \"\"\n self.reimbursement_rate = rate\n if using_legacy_models:\n self.reimbursed_amount = format_number_as_french(payment_info.amount)\n else:\n self.reimbursed_amount = format_number_as_french(finance_utils.to_euros(payment_info.amount))\n\n self.collective_booking_id = payment_info.collective_booking_id or \"\"\n\n # Offer type\n self.offer_type = serialize_offer_type_educational_or_individual(\n offer_is_educational=payment_info.collective_booking_id is not None\n )\n\n @typing.no_type_check # see comment for `__init__()` above\n def as_csv_row(self) -> list:\n return [\n self.validation_period,\n self.invoice_date,\n self.invoice_reference,\n self.cashflow_batch_label,\n self.reimbursement_point_common_name,\n self.reimbursement_point_address,\n self.reimbursement_point_siret,\n self.iban,\n self.venue_name,\n self.venue_address,\n self.venue_siret,\n self.offer_name,\n self.collective_booking_id,\n self.redactor_last_name,\n self.redactor_first_name,\n self.institution_name,\n self.event_date,\n self.booking_token,\n self.booking_used_date,\n self.booking_price_category_label,\n self.booking_total_amount,\n self.reimbursement_rate,\n self.reimbursed_amount,\n self.offer_type,\n ]\n\n\ndef generate_reimbursement_details_csv(reimbursement_details: Iterable[ReimbursementDetails]) -> str:\n output = StringIO()\n csv_lines = [reimbursement_detail.as_csv_row() for reimbursement_detail in reimbursement_details]\n writer = csv.writer(output, dialect=csv.excel, delimiter=\";\", quoting=csv.QUOTE_NONNUMERIC)\n writer.writerow(ReimbursementDetails.CSV_HEADER)\n writer.writerows(csv_lines)\n return output.getvalue()\n\n\ndef find_all_offerer_reimbursement_details(\n offerer_id: int,\n reimbursements_period: tuple[datetime.date | None, datetime.date | None],\n venue_id: int | None = None,\n) -> list[ReimbursementDetails]:\n return find_all_offerers_reimbursement_details(\n [offerer_id],\n reimbursements_period,\n venue_id=venue_id,\n )\n\n\ndef find_all_offerers_reimbursement_details(\n offerer_ids: list[int],\n reimbursements_period: tuple[datetime.date | None, datetime.date | None],\n venue_id: int | None = None,\n) -> list[ReimbursementDetails]:\n offerer_payments = finance_repository.find_all_offerers_payments(offerer_ids, reimbursements_period, venue_id) # type: ignore [arg-type]\n reimbursement_details = [ReimbursementDetails(offerer_payment) for offerer_payment in offerer_payments]\n\n return reimbursement_details\n\n\ndef validate_reimbursement_period(\n reimbursement_period_field_names: tuple[str, str], get_query_param: Callable\n) -> list[None] | list[datetime.date]:\n api_errors = ApiErrors()\n reimbursement_period_dates = []\n for field_name in reimbursement_period_field_names:\n try:\n reimbursement_period_dates.append(datetime.date.fromisoformat(get_query_param(field_name)))\n except (TypeError, ValueError):\n api_errors.add_error(field_name, \"Vous devez renseigner une date au format ISO (ex. 2021-12-24)\")\n if len(api_errors.errors) > 0:\n raise api_errors\n return reimbursement_period_dates or [None, None] # type: ignore [list-item]\n\n\nclass ReimbursementCsvQueryModel(BaseModel):\n venueId: int | None\n reimbursementPeriodBeginningDate: str | None\n reimbursementPeriodEndingDate: str | None\n","repo_name":"pass-culture/pass-culture-main","sub_path":"api/src/pcapi/routes/serialization/reimbursement_csv_serialize.py","file_name":"reimbursement_csv_serialize.py","file_ext":"py","file_size_in_byte":11852,"program_lang":"python","lang":"en","doc_type":"code","stars":86,"dataset":"github-code","pt":"29"} +{"seq_id":"36040566831","text":"# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nimport itertools\r\nimport argparse\r\n\r\nimport paddlers\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom easydict import EasyDict as edict\r\n\r\nfrom utils import Raster, time_it\r\n\r\n\r\ndef _calcOIF(rgb, stds, rho):\r\n r, g, b = rgb\r\n s1 = stds[int(r)]\r\n s2 = stds[int(g)]\r\n s3 = stds[int(b)]\r\n r12 = rho[int(r), int(g)]\r\n r23 = rho[int(g), int(b)]\r\n r31 = rho[int(b), int(r)]\r\n return (s1 + s2 + s3) / (abs(r12) + abs(r23) + abs(r31))\r\n\r\n\r\n@time_it\r\ndef oif(image_path, topk=5):\r\n raster = Raster(image_path)\r\n img = raster.getArray()\r\n img_flatten = img.reshape([-1, raster.bands])\r\n stds = np.std(img_flatten, axis=0)\r\n datas = edict()\r\n for c in range(raster.bands):\r\n datas[str(c + 1)] = img_flatten[:, c]\r\n datas = pd.DataFrame(datas)\r\n rho = datas.corr().values\r\n band_combs = edict()\r\n for rgb in itertools.combinations(list(range(raster.bands)), 3):\r\n band_combs[str(rgb)] = _calcOIF(rgb, stds, rho)\r\n band_combs = sorted(\r\n band_combs.items(), key=lambda kv: (kv[1], kv[0]), reverse=True)\r\n print(\"== Optimal band combination ==\")\r\n for i in range(topk):\r\n k, v = band_combs[i]\r\n print(\"Bands: {0}, OIF value: {1}.\".format(k, v))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--image_path\", type=str, required=True, \\\r\n help=\"Path of HSIs image.\")\r\n parser.add_argument(\"--topk\", type=int, default=5, \\\r\n help=\"Number of top results. The default value is 5.\")\r\n args = parser.parse_args()\r\n oif(args.image_path, args.topk)\r\n","repo_name":"PaddlePaddle/PaddleRS","sub_path":"tools/oif.py","file_name":"oif.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","stars":300,"dataset":"github-code","pt":"29"} +{"seq_id":"27678771771","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nDemo of MovieStim\n\nMovieStim opens a video file and displays it on a window.\n\n\"\"\"\nfrom psychopy import visual, core, event, constants\n\n# window to present the video\nwin = visual.Window((800, 600), fullscr=False)\n\n# create a new movie stimulus instance\nmov = visual.MovieStim(\n win,\n 'default.mp4', # path to video file\n size=(256, 256),\n flipVert=False,\n flipHoriz=False,\n loop=True,\n noAudio=False,\n volume=0.1,\n autoStart=False)\n\n# print some information about the movie\nprint('orig movie size={}'.format(mov.frameSize))\nprint('orig movie duration={}'.format(mov.duration))\n\n# instructions\ninstrText = \"`s` Start/Resume\\n`p` Pause\\n`r` Restart\\n`q` Stop and Close\"\ninstr = visual.TextStim(win, instrText, pos=(0.0, -0.75))\n\n# main loop\nwhile mov.status != constants.FINISHED:\n # draw the movie\n mov.draw()\n # draw the instruction text\n instr.draw()\n # flip buffers so they appear on the window\n win.flip()\n\n # process keyboard input\n if event.getKeys('q'): # quit\n break\n elif event.getKeys('s'): # play/start\n mov.play()\n elif event.getKeys('p'): # pause\n mov.pause()\n elif event.getKeys('r'): # restart/replay\n mov.replay()\n elif event.getKeys('m'): # volume up 5%\n mov.volumeUp()\n elif event.getKeys('n'): # volume down 5%\n mov.volumeDown()\n\n# stop the movie, this frees resources too\nmov.stop()\n\n# clean up and exit\nwin.close()\ncore.quit()\n\n# The contents of this file are in the public domain.\n","repo_name":"zhikun-hou/focus-experiment","sub_path":"src/PsychoPy/Lib/site-packages/psychopy/demos/coder/stimuli/MovieStim.py","file_name":"MovieStim.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"10714900184","text":"try:\n from typing import Union, Tuple, Optional\nexcept ImportError:\n pass\n\nfrom bitmaptools import draw_polygon, draw_line\nimport displayio\nimport terminalio\nfrom vectorio import Polygon\nfrom ulab import numpy as np\n\n__version__ = \"0.0.0+auto.0\"\n__repo__ = \"https://github.com/jposada202020/CircuitPython_Gauge.git\"\n\n\nclass gauge(displayio.Group):\n \"\"\"\n scales Class to add different elements to the screen.\n The origin point set by ``x`` and ``y`` properties\n\n :param int x: origin x coordinate. Defaults to 0.\n :param int y: origin y coordinate. Defaults to 0.\n :param int width: plot box width in pixels. Defaults to 100.\n :param int length: plot box height in pixels. Defaults to 100.\n :param int padding: padding for the scale box in all directions\n :param list|None scale_range: x range limits. Defaults to None\n\n :param int background_color: background color in HEX. Defaults to black ``0x000000``\n :param int box_color: allows to choose the box line color. Defaults to white ''0xFFFFFF``\n\n :param np.array|list ticks: axis ticks values\n :param int tick_lenght: x axes tick height in pixels. Defaults to 28.\n :param int tick_color: x axes tick height in pixels. Defaults to 0xFFFFFF.\n :param str|None tick_pos: Argument to locate the ticks. Left, center or all\n :param int pointer_lenght: width of the bar. Defaults to 10.\n :param int scale: scale of the widget\n\n :param str direction: direction of the scale either :attr:`horizontal` or :attr:`Vertical`\n defaults to :attr:`Vertical`\n\n \"\"\"\n\n def __init__(\n self,\n x: int = 0,\n y: int = 0,\n width: int = 100,\n length: int = 100,\n padding: int = 0,\n scale_range: Optional[list] = None,\n background_color: int = 0x000000,\n box_color: int = 0xFF8500,\n ticks: Optional[Union[np.array, list]] = None,\n tick_lenght: int = 28,\n tick_color: int = 0xFFFFFF,\n tick_color_threshold: int = 0xFF0000,\n tick_pos: Optional[str] = None,\n pointer_lenght: int = 10,\n scale: int = 1,\n show_text: bool = False,\n text_format: Optional[str] = None,\n direction: str = \"Vertical\",\n ) -> None:\n super().__init__(x=x, y=y, scale=scale)\n self.padding = padding\n if direction == \"Vertical\":\n self.direction = True\n self._width = width\n self._length = length\n self._newvaluemin = self._length - self.padding\n self._newvaluemax = self.padding\n self._newxmin = self.padding\n self._newxmax = self._width - self.padding\n else:\n self.direction = False\n self._width = length\n self._length = width\n self._newvaluemin = self.padding\n self._newvaluemax = self._width - self.padding\n self._newxmin = self.padding\n self._newxmax = self._length - self.padding\n\n self._plotbitmap = displayio.Bitmap(self._width + 1, self._length + 1, 6)\n\n # Box Points\n self._boxpos_x0 = self.padding\n self._boxpos_y0 = self.padding\n self._boxpos_x1 = self._width - self.padding\n self._boxpos_y1 = self._length - self.padding\n\n self._valuemin = scale_range[0]\n self._valuemax = scale_range[1]\n\n if ticks:\n self.ticks = np.array(ticks)\n else:\n self.ticks = np.array(list(range(self._valuemin, self._valuemax, 10)))\n\n self._showtext = show_text\n\n self._tickcolor = tick_color\n self._tick_color_threshold = tick_color_threshold\n self._pointer_palette = displayio.Palette(3)\n self._pointer_palette.make_transparent(0)\n self._pointer_palette[1] = self._tickcolor\n self._pointer_palette[2] = self._tick_color_threshold\n self.pointer = None\n self._pointer_length = pointer_lenght\n self._tick_length = tick_lenght\n\n self.value = 0\n self.threshold = 0\n\n self._showticks = True\n\n if text_format == \"float\":\n self._text_format = True\n else:\n self._text_format = False\n\n self._plot_palette = displayio.Palette(6)\n self._plot_palette[0] = background_color\n self._plot_palette[1] = box_color\n self._plot_palette[2] = self._tickcolor\n\n self.points = None\n\n if self.direction:\n self._center = (self._newxmax - self._newxmin) // 2\n self.x0 = self._center - self._pointer_length // 2 + 2 * self.padding\n self.y0 = self._newvaluemin\n self.x1 = self._center + self._pointer_length // 2\n self.y1 = self._newvaluemin - self.value\n else:\n self._center = (self._newxmax - self._newxmin) // 2\n self.x0 = self._boxpos_x0 + self.padding\n self.y0 = self._center - self._pointer_length // 2\n self.x1 = self._boxpos_x0 + self.value\n self.y1 = self._center + self._pointer_length // 2\n\n if tick_pos == \"left\":\n self._tickpos = self._newxmin\n elif tick_pos == \"center\":\n self._tickpos = self._center - self._tick_length // 2\n else:\n self._tickpos = self._newxmin\n self._tick_length = self._width\n\n self.append(\n displayio.TileGrid(\n self._plotbitmap, pixel_shader=self._plot_palette, x=0, y=0\n )\n )\n self._draw_ticks()\n self._draw_pointer()\n self._drawbox()\n\n def _drawbox(self) -> None:\n \"\"\"\n Draw the plot box\n\n :return: None\n\n \"\"\"\n xs = bytes([self._boxpos_x0, self._boxpos_x0, self._boxpos_x1, self._boxpos_x1])\n ys = bytes([self._boxpos_y0, self._boxpos_y1, self._boxpos_y1, self._boxpos_y0])\n draw_polygon(self._plotbitmap, xs, ys, 1)\n\n @staticmethod\n def transform(\n oldrangemin: Union[float, int],\n oldrangemax: Union[float, int],\n newrangemin: Union[float, int],\n newrangemax: Union[float, int],\n value: Union[float, int],\n ) -> Union[float, int]:\n \"\"\"\n This function converts the original value into a new defined value in the new range\n\n :param int|float oldrangemin: minimum of the original range\n :param int|float oldrangemax: maximum of the original range\n :param int|float newrangemin: minimum of the new range\n :param int|float newrangemax: maximum of the new range\n :param int|float value: value to be converted\n\n :return int|float: converted value\n\n \"\"\"\n\n return (\n ((value - oldrangemin) * (newrangemax - newrangemin))\n / (oldrangemax - oldrangemin)\n ) + newrangemin\n\n def _draw_ticks(self) -> None:\n \"\"\"\n Draw ticks in the plot area\n\n \"\"\"\n\n ticksnorm = np.array(\n self.transform(\n self._valuemin,\n self._valuemax,\n self._newvaluemin,\n self._newvaluemax,\n self.ticks,\n ),\n dtype=np.int16,\n )\n if self.direction:\n for i, tick in enumerate(ticksnorm):\n draw_line(\n self._plotbitmap,\n self._newxmin + self._tickpos,\n tick,\n self._newxmin + self._tick_length + self._tickpos,\n tick,\n 2,\n )\n if self._showtext:\n if self._text_format:\n self.show_text(\n \"{:.2f}\".format(self.ticks[i]),\n self._newxmin,\n tick,\n (1.0, 0.5),\n )\n else:\n self.show_text(\n \"{:d}\".format(int(self.ticks[i])),\n self._newxmin,\n tick,\n (1.0, 0.5),\n )\n else:\n for i, tick in enumerate(ticksnorm):\n draw_line(\n self._plotbitmap,\n self._boxpos_x0 + tick,\n self._boxpos_y0 + self._tickpos,\n self._boxpos_x0 + tick,\n self._boxpos_y0 + self._tick_length + self._tickpos,\n 2,\n )\n if self._showtext:\n if self._text_format:\n self.show_text(\n \"{:.2f}\".format(self.ticks[i]),\n tick,\n self._boxpos_y1,\n (0.5, 0.0),\n )\n else:\n self.show_text(\n \"{:d}\".format(int(self.ticks[i])),\n tick,\n self._boxpos_y1,\n (0.5, 0.0),\n )\n\n def show_text(\n self, text: str, x: int, y: int, anchorpoint: Tuple = (0.5, 0.0)\n ) -> None:\n \"\"\"\n\n Show desired text in the screen\n :param str text: text to be displayed\n :param int x: x coordinate\n :param int y: y coordinate\n :param Tuple anchorpoint: Display_text anchor point. Defaults to (0.5, 0.0)\n :return: None\n \"\"\"\n if self._showtext:\n from adafruit_display_text import bitmap_label\n\n text_toplot = bitmap_label.Label(terminalio.FONT, text=text, x=x, y=y)\n text_toplot.anchor_point = anchorpoint\n text_toplot.anchored_position = (x, y)\n self.append(text_toplot)\n\n def _draw_pointer(self):\n self.points = [\n (self.x0, self.y0),\n (self.x1, self.y0),\n (self.x1, self.y1),\n (self.x0, self.y1),\n ]\n\n self.pointer = Polygon(\n pixel_shader=self._pointer_palette,\n points=self.points,\n x=0,\n y=0,\n color_index=1,\n )\n self.append(self.pointer)\n\n def update(self, new_value):\n \"\"\"\n Function to update gauge value\n\n :param new_value: value to be updated\n :return: None\n\n \"\"\"\n\n if self.direction:\n self.value = int(\n self.transform(\n self._valuemin,\n self._valuemax,\n self._newvaluemax,\n self._newvaluemin,\n new_value,\n )\n )\n if self.value >= self._newvaluemin:\n self.value = self._newvaluemin\n self.y1 = self._newvaluemin - self.value\n else:\n self.value = int(\n self.transform(\n self._valuemin,\n self._valuemax,\n self._newvaluemin,\n self._newvaluemax,\n new_value,\n )\n )\n self.x1 = self._newvaluemin + self.value\n self.points = [\n (self.x0, self.y0),\n (self.x1, self.y0),\n (self.x1, self.y1),\n (self.x0, self.y1),\n ]\n\n if self.value > self.threshold:\n self.pointer.color_index = 2\n else:\n self.pointer.color_index = 1\n self.pointer.points = self.points\n\n def set_threshold(self, value: int, color: int = 0xFF0000) -> None:\n \"\"\"\n Defines the threshold for the gage to change color\n :param value: value that will trigger the change\n :param color: color to change into. Defaults to red :const:`0xFF0000`\n :return: None\n \"\"\"\n self._pointer_palette[2] = color\n self.threshold = value\n","repo_name":"jposada202020/CircuitPython_gauge","sub_path":"gauge.py","file_name":"gauge.py","file_ext":"py","file_size_in_byte":11848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"28869629481","text":"import pytest\n\nfrom cassandra.cluster import Cluster, NoHostAvailable\nfrom cassandra.connection import DRIVER_NAME, DRIVER_VERSION\nimport json\nimport os\nimport ssl\nimport subprocess\nimport tempfile\nimport time\nimport random\n\nfrom util import unique_name, new_test_table, cql_session, local_process_id\n\n\nprint(f\"Driver name {DRIVER_NAME}, version {DRIVER_VERSION}\")\n\n\n# By default, tests run against a CQL server (Scylla or Cassandra) listening\n# on localhost:9042. Add the --host and --port options to allow overiding\n# these defaults.\ndef pytest_addoption(parser):\n parser.addoption('--host', action='store', default='localhost',\n help='CQL server host to connect to')\n parser.addoption('--port', action='store', default='9042',\n help='CQL server port to connect to')\n parser.addoption('--ssl', action='store_true',\n help='Connect to CQL via an encrypted TLSv1.2 connection')\n # Used by the wrapper script only, not by pytest, added here so it appears\n # in --help output and so that pytest's argparser won't protest against its\n # presence.\n parser.addoption('--omit-scylla-output', action='store_true',\n help='Omit scylla\\'s output from the test output')\n\n# \"cql\" fixture: set up client object for communicating with the CQL API.\n# The host/port combination of the server are determined by the --host and\n# --port options, and defaults to localhost and 9042, respectively.\n# We use scope=\"session\" so that all tests will reuse the same client object.\n@pytest.fixture(scope=\"session\")\ndef cql(request):\n try:\n # Use the default superuser credentials, which work for both Scylla and Cassandra\n with cql_session(request.config.getoption('host'),\n request.config.getoption('port'),\n request.config.getoption('ssl'),\n username=\"cassandra\",\n password=\"cassandra\"\n ) as session:\n yield session\n session.shutdown()\n except NoHostAvailable:\n # We couldn't create a cql connection. Instead of reporting that\n # each individual test failed, let's just exit immediately.\n pytest.exit(f\"Cannot connect to Scylla at --host={request.config.getoption('host')} --port={request.config.getoption('port')}\", returncode=pytest.ExitCode.INTERNAL_ERROR)\n\n# A function-scoped autouse=True fixture allows us to test after every test\n# that the CQL connection is still alive - and if not report the test which\n# crashed Scylla and stop running any more tests.\n@pytest.fixture(scope=\"function\", autouse=True)\ndef cql_test_connection(cql, request):\n yield\n try:\n # We want to run a do-nothing CQL command. \n # \"BEGIN BATCH APPLY BATCH\" is the closest to do-nothing I could find...\n cql.execute(\"BEGIN BATCH APPLY BATCH\")\n except:\n pytest.exit(f\"Scylla appears to have crashed in test {request.node.parent.name}::{request.node.name}\")\n\n# Until Cassandra 4, NetworkTopologyStrategy did not support the option\n# replication_factor (https://issues.apache.org/jira/browse/CASSANDRA-14303).\n# We want to allow these tests to run on Cassandra 3.* (for the convenience\n# of developers who happen to have it installed), so we'll use the older\n# syntax that needs to specify a DC name explicitly. For this, will have\n# a \"this_dc\" fixture to figure out the name of the current DC, so it can be\n# used in NetworkTopologyStrategy.\n@pytest.fixture(scope=\"session\")\ndef this_dc(cql):\n yield cql.execute(\"SELECT data_center FROM system.local\").one()[0]\n\n# \"test_keyspace\" fixture: Creates and returns a temporary keyspace to be\n# used in tests that need a keyspace. The keyspace is created with RF=1,\n# and automatically deleted at the end. We use scope=\"session\" so that all\n# tests will reuse the same keyspace.\n@pytest.fixture(scope=\"session\")\ndef test_keyspace(cql, this_dc):\n name = unique_name()\n cql.execute(\"CREATE KEYSPACE \" + name + \" WITH REPLICATION = { 'class' : 'NetworkTopologyStrategy', '\" + this_dc + \"' : 1 }\")\n yield name\n cql.execute(\"DROP KEYSPACE \" + name)\n\n# The \"scylla_only\" fixture can be used by tests for Scylla-only features,\n# which do not exist on Apache Cassandra. A test using this fixture will be\n# skipped if running with \"run-cassandra\".\n@pytest.fixture(scope=\"session\")\ndef scylla_only(cql):\n # We recognize Scylla by checking if there is any system table whose name\n # contains the word \"scylla\":\n names = [row.table_name for row in cql.execute(\"SELECT * FROM system_schema.tables WHERE keyspace_name = 'system'\")]\n if not any('scylla' in name for name in names):\n pytest.skip('Scylla-only test skipped')\n\n# \"cassandra_bug\" is similar to \"scylla_only\", except instead of skipping\n# the test, it is expected to fail (xfail) on Cassandra. It should be used\n# in rare cases where we consider Scylla's behavior to be the correct one,\n# and Cassandra's to be the bug.\n@pytest.fixture(scope=\"session\")\ndef cassandra_bug(cql):\n # We recognize Scylla by checking if there is any system table whose name\n # contains the word \"scylla\":\n names = [row.table_name for row in cql.execute(\"SELECT * FROM system_schema.tables WHERE keyspace_name = 'system'\")]\n if not any('scylla' in name for name in names):\n pytest.xfail('A known Cassandra bug')\n\n# Consistent schema change feature is optionally enabled and\n# some tests are expected to fail on Scylla without this\n# option enabled, and pass with it enabled (and also pass on Cassandra).\n# These tests should use the \"fails_without_consistent_cluster_management\"\n# fixture. When consistent mode becomes the default, this fixture can be removed.\n@pytest.fixture(scope=\"function\")\ndef check_pre_consistent_cluster_management(cql):\n # If not running on Scylla, return false.\n names = [row.table_name for row in cql.execute(\"SELECT * FROM system_schema.tables WHERE keyspace_name = 'system'\")]\n if not any('scylla' in name for name in names):\n return False\n # In Scylla, we check Raft mode by inspecting the configuration via CQL.\n consistent = list(cql.execute(\"SELECT value FROM system.config WHERE name = 'consistent_cluster_management'\"))\n return len(consistent) == 0 or consistent[0].value == \"false\"\n\n\n@pytest.fixture(scope=\"function\")\ndef fails_without_consistent_cluster_management(request, check_pre_consistent_cluster_management):\n if check_pre_consistent_cluster_management:\n request.node.add_marker(pytest.mark.xfail(reason=\"Test expected to fail without consistent cluster management \"\n \"feature on\"))\n# Older versions of the Cassandra driver had a bug where if Scylla returns\n# an empty page, the driver would immediately stop reading even if this was\n# not the last page. Some tests which filter out most of the results can end\n# up with some empty pages, and break on buggy versions of the driver. These\n# tests should be skipped when using a buggy version of the driver. This is\n# the purpose of the following fixture.\n# This driver bug was fixed in Scylla driver 3.24.5 and Datastax driver\n# 3.25.1, in the following commits:\n# https://github.com/scylladb/python-driver/commit/6ed53d9f7004177e18d9f2ea000a7d159ff9278e,\n# https://github.com/datastax/python-driver/commit/1d9077d3f4c937929acc14f45c7693e76dde39a9\n@pytest.fixture(scope=\"function\")\ndef driver_bug_1():\n scylla_driver = 'Scylla' in DRIVER_NAME\n driver_version = tuple(int(x) for x in DRIVER_VERSION.split('.'))\n if (scylla_driver and driver_version < (3, 24, 5) or\n not scylla_driver and driver_version <= (3, 25, 0)):\n pytest.skip(\"Python driver too old to run this test\")\n\n# `random_seed` fixture should be used when the test uses random module.\n# If the fixture is used, the seed is visible in case of test's failure,\n# so it can be easily recreated.\n# The state of random module is restored to before-test state after the test finishes.\n@pytest.fixture(scope=\"function\")\ndef random_seed():\n state = random.getstate()\n seed = time.time()\n print(f\"Using seed {seed}\")\n random.seed(seed)\n yield seed\n random.setstate(state)\n\n# TODO: use new_test_table and \"yield from\" to make shared test_table\n# fixtures with some common schemas.\n\n# To run the Scylla tools, we need to run Scylla executable itself, so we\n# need to find the path of the executable that was used to run Scylla for\n# this test. We do this by trying to find a local process which is listening\n# to the address and port to which our our CQL connection is connected.\n# If such a process exists, we verify that it is Scylla, and return the\n# executable's path. If we can't find the Scylla executable we use\n# pytest.skip() to skip tests relying on this executable.\n@pytest.fixture(scope=\"session\")\ndef scylla_path(cql):\n pid = local_process_id(cql)\n if not pid:\n pytest.skip(\"Can't find local Scylla process\")\n # Now that we know the process id, use /proc to find the executable.\n try:\n path = os.readlink(f'/proc/{pid}/exe')\n except:\n pytest.skip(\"Can't find local Scylla executable\")\n # Confirm that this executable is a real tool-providing Scylla by trying\n # to run it with the \"--list-tools\" option\n try:\n subprocess.check_output([path, '--list-tools'])\n except:\n pytest.skip(\"Local server isn't Scylla\")\n return path\n\n# A fixture for finding Scylla's data directory. We get it using the CQL\n# interface to Scylla's configuration. Note that if the server is remote,\n# the directory retrieved this way may be irrelevant, whether or not it\n# exists on the local machine... However, if the same test that uses this\n# fixture also uses the scylla_path fixture, the test will anyway be skipped\n# if the running Scylla is not on the local machine local.\n@pytest.fixture(scope=\"module\")\ndef scylla_data_dir(cql):\n try:\n dir = json.loads(cql.execute(\"SELECT value FROM system.config WHERE name = 'data_file_directories'\").one().value)[0]\n return dir\n except:\n pytest.skip(\"Can't find Scylla sstable directory\")\n\n@pytest.fixture(scope=\"function\")\ndef temp_workdir():\n \"\"\" Creates a temporary work directory, for the scope of a single test. \"\"\"\n with tempfile.TemporaryDirectory() as workdir:\n yield workdir\n","repo_name":"scylladb/scylladb","sub_path":"test/cql-pytest/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":10270,"program_lang":"python","lang":"en","doc_type":"code","stars":11533,"dataset":"github-code","pt":"29"} +{"seq_id":"25477827790","text":"import time\nimport base64\nimport json\nimport uuid\nimport os\nimport configparser\nfrom collections import OrderedDict\nfrom enum import Enum\nfrom functools import wraps\nfrom typing import List\n\nimport jwt\nimport requests\nfrom flask import Flask, abort, jsonify, make_response, request\nfrom jwt import PyJWKClient\n\napp = Flask(__name__)\n\nconfig = configparser.ConfigParser()\nconfig.read('resources/config.ini')\nORG_NAME = config['ASGARDEO']['ORGANIZATION']\nCUSTOMER_GROUP_ID = config['ASGARDEO']['CUSTOMER_GROUP_ID']\n\nasgardeo_public_key = None\nJWKS_URL = f\"https://api.asgardeo.io/t/{ORG_NAME}/oauth2/jwks\"\nAUD = config['ASGARDEO']['AUDIENCE']\nADMIN_CLIENT_ID = config['ASGARDEO']['ADMIN_CLIENT_ID']\nADMIN_CLIENT_SECRET = os.getenv(\"ADMIN_CLIENT_SECRET\")\nACCESS_TOKEN = {}\n\n\n# Device model\nclass Device:\n def __init__(self, device_id, name, image_uri, qty, description, price, promo_id_list=None):\n self.device_id = device_id\n self.name = name\n self.image_uri = image_uri\n self.qty = qty\n self.description = description\n self.price = price\n self.promo_id_list = promo_id_list\n\n\nclass DeviceEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, Device):\n return obj.__dict__\n elif isinstance(obj, list):\n return [item.__dict__ for item in obj if item is not None]\n return json.JSONEncoder.default(self, obj)\n\n\nclass Tier(Enum):\n NoTier = 0\n Silver = 1\n Gold = 2\n Platinum = 3\n\n\n# Promotion model\nclass Promotion:\n\n def __init__(self, promo_id, promo_code, discount, tier_list=None):\n self.promo_id = promo_id\n self.promo_code = promo_code\n self.discount = discount\n self.tier_list = tier_list\n\n def get_tier_enums(self: List[str]) -> List[Tier]:\n return [Tier[tier] for tier in self]\n\n\nclass PromotionEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, Promotion):\n data = {\n 'promo_id': obj.promo_id,\n 'promo_code': obj.promo_code,\n 'discount': obj.discount,\n 'tier_list': [tier.name for tier in obj.tier_list] if obj.tier_list else []\n }\n return data\n return super().default(obj)\n\n\ndevices = OrderedDict({\n \"c9912c06-0a57-4812-89cb-8322c90fb3e5\": Device(\"c9912c06-0a57-4812-89cb-8322c90fb3e5\", 'iPhone 14 Pro Max',\n 'https://www.dialog.lk/dialogdocroot/content/images/devices/iphone14pro-deeppurple.png',\n 15, 'Description 1', 100, [1, 2]),\n \"d4e2c72a-1785-454b-ae90-4796859f85d4\": Device(\"d4e2c72a-1785-454b-ae90-4796859f85d4\", 'Samsung Galaxy S22 Ultra',\n 'https://www.dialog.lk/dialogdocroot/content/images/devices/samsung-galaxy-ultra-black-med.jpg',\n 5, 'Description 2', 200, [2, 3]),\n \"8c4dd076-e817-4969-a4fa-e33a28023d83\": Device(\"8c4dd076-e817-4969-a4fa-e33a28023d83\", 'Google Pixel 7 Pro',\n 'https://fdn2.gsmarena.com/vv/bigpic/google-pixel7-pro-new.jpg',\n 8, 'Description 3', 200)\n})\n\npromotions = [\n Promotion(1, 'PROMO1', 10, [Tier.Silver]),\n Promotion(2, 'PROMO2', 20, [Tier.Gold]),\n Promotion(3, 'PROMO3', 30, [Tier.Platinum])\n]\n\ncustomers = {}\n\n\n# Get device by ID\ndef get_device(device_id):\n return devices.get(device_id)\n\n\n# Get promotion by ID\ndef get_promotion(promo_id):\n for promotion in promotions:\n if promotion.promo_id == promo_id:\n return promotion\n return None\n\n\n# Get customer by ID\ndef get_customer(customer_id):\n return customers.get(customer_id)\n\n\ndef get_unauthorized_response(message=None):\n if message:\n return make_response(jsonify(message=message), 401)\n\n return make_response(jsonify(message=f\"Unauthorized\"), 401)\n\n\n# Define a custom Flask decorator for JWT authentication\ndef requires_auth(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n # Get JWT access token from the Authorization header\n authz_header = request.headers.get('Authorization')\n if not authz_header:\n abort(get_unauthorized_response())\n\n if len(authz_header.split()) != 2:\n abort(get_unauthorized_response())\n\n token = authz_header.split()[1]\n if not token:\n abort(get_unauthorized_response()) # Unauthorized\n\n try:\n public_key = get_public_key(token)\n # Decode JWT access token and verify signature using the public key\n jwt.decode(token, public_key, algorithms=['RS256'], audience=AUD, verify=True)\n except:\n abort(401) # Unauthorized\n\n return f(*args, **kwargs)\n\n return decorated\n\n\ndef get_public_key(token):\n global asgardeo_public_key\n if asgardeo_public_key is not None:\n return asgardeo_public_key\n\n jwks_client = PyJWKClient(JWKS_URL)\n signing_key = jwks_client.get_signing_key_from_jwt(token)\n asgardeo_public_key = signing_key.key\n return asgardeo_public_key\n\n\ndef authorize(required_scopes):\n def decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n token = request.headers.get('Authorization').split()[1]\n if not token:\n abort(401) # Unauthorized\n\n decoded_token = jwt.decode(token, options={\"verify_signature\": False})\n\n if 'scope' not in decoded_token:\n decoded_token['scope'] = []\n for required_scope in required_scopes:\n if required_scope not in decoded_token['scope']:\n return jsonify({'message': 'Insufficient Scopes'}), 401\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorator\n\n\n# API endpoints\n@app.route('/devices', methods=['GET'])\n@requires_auth\n@authorize(required_scopes=['devices_list'])\ndef get_devices():\n return json.dumps([device.__dict__ for device in devices.values()], cls=DeviceEncoder), 200, {\n 'content-type': 'application/json'}\n\n\n@app.route('/devices/', methods=['GET'])\n@requires_auth\n@authorize(required_scopes=['devices_list'])\ndef get_device_by_id(device_id):\n device = get_device(device_id)\n if device:\n return json.dumps(device, cls=DeviceEncoder), 200, {'content-type': 'application/json'}\n else:\n return jsonify({'message': 'Device not found'}), 404\n\n\n# Search device by name\n@app.route('/devices/search/', methods=['GET'])\n@requires_auth\n@authorize(required_scopes=['devices_list'])\ndef search_device_by_name(device_name):\n device_list = []\n for device_id, device in devices.items():\n if device_name.lower() in device.name.lower():\n device_list.append(device)\n return json.dumps(device_list, cls=DeviceEncoder), 200, {'content-type': 'application/json'}\n\n\n@app.route('/devices', methods=['POST'])\n@requires_auth\n@authorize(required_scopes=['devices_add'])\ndef add_device():\n device_data = request.get_json()\n # generate a uuid for the device as a string\n device_id = f'{uuid.uuid1()}'\n if 'name' not in device_data or 'image_uri' not in device_data or 'qty' not in device_data or \\\n 'description' not in device_data or 'price' not in device_data:\n return jsonify({'message': 'Missing required fields'}), 400\n promo_id_list = []\n if 'promo_id' in device_data:\n promo_id_list = device_data['promo_id']\n new_device = Device(device_id, device_data['name'], device_data['image_uri'], device_data['qty'],\n device_data['description'], device_data['price'], promo_id_list)\n devices[device_id] = new_device\n return jsonify(new_device.__dict__), 201\n\n\n@app.route('/devices/', methods=['PUT', 'PATCH'])\n@requires_auth\n@authorize(required_scopes=['devices_modify'])\ndef update_device(device_id):\n device = get_device(device_id)\n if not device:\n return jsonify({'message': 'Device not found'}), 404\n\n device_data = request.get_json()\n if 'name' in device_data:\n device.name = device_data['name']\n if 'image_uri' in device_data:\n device.image_uri = device_data['image_uri']\n if 'qty' in device_data:\n device.qty = device_data['qty']\n if 'description' in device_data:\n device.description = device_data['description']\n if 'price' in device_data:\n device.price = device_data['price']\n if 'promo_id' in device_data:\n device.promo_id = device_data['promo_id']\n\n devices[device_id] = device\n return jsonify(device.__dict__), 200\n\n\n@app.route('/devices/', methods=['DELETE'])\n@requires_auth\n@authorize(required_scopes=['devices_delete'])\ndef delete_device(device_id):\n # check if device exists\n if device_id in devices:\n devices.pop(device_id)\n return jsonify({'message': f\"Device with ID {device_id} deleted successfully\"}), 204\n else:\n response = make_response(jsonify(message=f\"Device with ID {device_id} not found\"), 404)\n abort(response)\n\n\n@app.route('/promotions', methods=['GET'])\n@requires_auth\n@authorize(required_scopes=['promotions_list'])\ndef get_promotions():\n return json.dumps({\"promotions\": promotions}, cls=PromotionEncoder), 200, {'content-type': 'application/json'}\n\n\n@app.route('/promotions/', methods=['GET'])\n@requires_auth\n@authorize(required_scopes=['promotions_list'])\ndef get_promotion_by_id(promo_id):\n promotion = get_promotion(promo_id)\n if promotion:\n return json.dumps(promotion, cls=PromotionEncoder), 201, {'content-type': 'application/json'}\n else:\n return jsonify({'message': 'Promotion not found'}), 404\n\n\n@app.route('/promotions', methods=['POST'])\n@requires_auth\n@authorize(required_scopes=['promotions_add'])\ndef add_promotion():\n new_promotion = request.get_json()\n new_promotion['promo_id'] = str(uuid.uuid4()) # generate new UUID for promotion ID\n\n if 'promo_code' not in new_promotion or 'discount' not in new_promotion:\n return jsonify({'message': 'Missing required fields'}), 400\n tier_list = []\n if 'tier' in new_promotion:\n tier_list = new_promotion['tier']\n tier_list = Promotion.get_tier_enums(tier_list)\n new_promotion = Promotion(new_promotion['promo_id'], new_promotion['promo_code'], new_promotion['discount'],\n tier_list)\n promotions.append(new_promotion)\n return json.dumps(new_promotion, cls=PromotionEncoder), 201, {'content-type': 'application/json'}\n\n\n@app.route('/promotions/', methods=['PUT', 'PATCH'])\n@requires_auth\n@authorize(required_scopes=['promotions_modify'])\ndef update_promotion(promo_id):\n promotion = get_promotion(promo_id)\n if not promotion:\n return jsonify({'message': 'Promotion not found'}), 404\n\n promotion_data = request.get_json()\n if 'promo_code' in promotion_data:\n promotion.promo_code = promotion_data['promo_code']\n if 'discount' in promotion_data:\n promotion.discount = promotion_data['discount']\n if 'tier' in promotion_data:\n promotion.tier_list = Promotion.get_tier_enums(promotion_data['tier'])\n\n return json.dumps(promotion, cls=PromotionEncoder), 201, {'content-type': 'application/json'}\n\n\n@app.route('/promotions/', methods=['DELETE'])\n@requires_auth\n@authorize(required_scopes=['promotions_delete'])\ndef delete_promotion(promo_id):\n promotion = [p for p in promotions if p.promo_id == promo_id]\n if len(promotion) == 0:\n abort(404, f\"Promotion with ID {promo_id} not found\")\n\n promotions.remove(promotion[0])\n return jsonify({'message': f\"Promotion with ID {promo_id} deleted successfully\"})\n\n\n# Add promotion to device\n@app.route('/promotions/devices', methods=['POST'])\n@requires_auth\n@authorize(required_scopes=['promotions_modify'])\ndef add_promotion_to_device():\n # \"promo_id\": \"1\",\n # \"device_ids\": [\n # 1,2\n # ]\n # Sample request body\n promotion_data = request.get_json()\n if 'promo_id' not in promotion_data or 'device_ids' not in promotion_data:\n abort(400, 'Missing promo_code or device_ids from the request body')\n promo_id = promotion_data['promo_id']\n device_ids = promotion_data['device_ids']\n promotion = get_promotion_by_id(promo_id)\n if not promotion:\n abort(404, f\"Promotion with ID {promo_id} not found\")\n for device_id in device_ids:\n device = get_device(device_id)\n if not device:\n abort(404, f\"Device with ID {device_id} not found\")\n device.promo_id_list.append(promo_id)\n\n return jsonify({'message': f\"Promotion with ID {promo_id} added to devices successfully\"})\n\n\n# Define a Flask endpoint that requires JWT access token\n@app.route('/sales_trends')\n@requires_auth\n@authorize(required_scopes=['sales_trends_view'])\ndef sales_activity():\n return 'Access granted!'\n\n\n@app.route('/customers', methods=['GET'])\n@requires_auth\n@authorize(required_scopes=['customers_list'])\ndef get_customers():\n return json.dumps([customer.__dict__ for customer in customers.values()]), 200, {'content-type': 'application/json'}\n\n\n # - firstName\n # - lastName\n # - tier\n # - country\n@app.route('/customers', methods=['POST'])\n@requires_auth\n@authorize(required_scopes=['customers_add'])\ndef add_customer():\n customer_data = request.get_json()\n if 'firstName' not in customer_data or 'lastName' not in customer_data or 'country' not in customer_data or 'email' not in customer_data:\n return jsonify({'message': 'Missing required fields'}), 400\n \n access_token = get_token(\"internal_user_mgt_create internal_group_mgt_update\")\n try:\n create_customer(access_token, customer_data['email'], customer_data['firstName'], customer_data['lastName'])\n except:\n return jsonify({'message': 'Error creating customer'}), 500\n\n return jsonify({'message': 'Customer created successfully.'}), 201\n\n\n@app.route('/customers/', methods=['PATCH'])\n@requires_auth\n@authorize(required_scopes=['customers_modify'])\ndef update_customer(customer_id):\n customer = get_customer(customer_id)\n if not customer:\n return jsonify({'message': 'Customer not found'}), 404\n\n # device_data = request.get_json()\n # if 'name' in device_data:\n # device.name = device_data['name']\n # if 'image_uri' in device_data:\n # device.image_uri = device_data['image_uri']\n # if 'qty' in device_data:\n # device.qty = device_data['qty']\n # if 'description' in device_data:\n # device.description = device_data['description']\n # if 'price' in device_data:\n # device.price = device_data['price']\n # if 'promo_id' in device_data:\n # device.promo_id = device_data['promo_id']\n\n return jsonify({'device': customer.__dict__}), 200\n\n\n@app.route('/update/', methods=['PATCH'])\n@requires_auth\n@authorize(required_scopes=[])\ndef update_user(user_id):\n access_token = get_token(\"internal_user_mgt_update\")\n profile_data = request.get_json()\n full_name = \"\"\n if 'full_name' in profile_data:\n full_name = profile_data['full_name']\n if not full_name:\n return jsonify({'message': 'Full name not found'}), 400\n\n update_user(user_id, access_token, full_name)\n\n # return jsonify({'device': customer.__dict__}), 200\n return jsonify({'message': 'Updated'}), 200\n\n\ndef get__new_access_token(scopes):\n # Define the request URL\n url = f\"https://api.asgardeo.io/t/{ORG_NAME}/oauth2/token\"\n\n # Define the request headers\n headers = {\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Authorization\": f\"Basic {base64.b64encode(f'{ADMIN_CLIENT_ID}:{ADMIN_CLIENT_SECRET}'.encode()).decode()}\"\n }\n\n # Define the request body\n data = {\n \"grant_type\": \"client_credentials\",\n \"scope\": f\"{scopes}\"\n }\n\n # Send the request and retrieve the response\n response = requests.post(url, headers=headers, data=data)\n\n # Check if the response was successful\n if response.status_code == 200:\n # Retrieve the access token from the response body\n access_token = response.json()[\"access_token\"]\n\n # Return the access token\n ACCESS_TOKEN[scopes] = access_token\n else:\n # Raise an exception if the response was not successful\n response.raise_for_status()\n\n\ndef get_token(scopes):\n if scopes in ACCESS_TOKEN:\n decoded_token = jwt.decode(ACCESS_TOKEN[scopes], options={\"verify_signature\": False})\n if decoded_token['exp'] < time.time():\n get__new_access_token(scopes)\n return ACCESS_TOKEN[scopes]\n else:\n get__new_access_token(scopes)\n return ACCESS_TOKEN[scopes]\n\n\ndef update_user(user_id, token, full_name):\n url = f\"https://api.asgardeo.io/t/{ORG_NAME}/scim2/Users/{user_id}\"\n headers = {\n \"Authorization\": f\"Bearer {token}\",\n \"Content-Type\": \"application/json\"\n }\n data = {\n \"Operations\": [\n {\n \"op\": \"replace\",\n \"value\": {\n \"name\": {\n \"formatted\": full_name\n }\n }\n }\n\n ],\n \"schemas\": [\n \"urn:ietf:params:scim:api:messages:2.0:PatchOp\"\n ]\n }\n response = requests.patch(url, headers=headers, json=data)\n if response.ok:\n return response.json()\n else:\n response.raise_for_status()\n\n\ndef create_customer(token, email, first_name, last_name):\n url = f\"https://api.asgardeo.io/t/{ORG_NAME}/scim2/Users\"\n headers = {\n \"Authorization\": f\"Bearer {token}\",\n \"Content-Type\": \"application/json\"\n }\n payload = {\n \"userName\": f\"DEFAULT/{email}\",\n \"name\": {\n \"familyName\": f\"{first_name}\",\n \"givenName\": f\"{last_name}\"\n },\n \"emails\": [\n {\n \"primary\": True,\n \"value\": f\"{email}\"\n }\n ],\n \"urn:scim:wso2:schema\": {\n \"askPassword\": \"true\"\n }\n }\n\n response = requests.post(url, headers=headers, data=json.dumps(payload))\n\n if response.status_code == 201:\n print(\"User created successfully!\")\n response_json = response.json()\n user_id = response_json['id']\n username = response_json['userName']\n add_user_to_group(token, username, user_id)\n else:\n print(\"User creation failed.\")\n print(response.status_code, response.content)\n response.raise_for_status()\n\n\ndef add_user_to_group(token, username, user_id):\n url = f\"https://api.asgardeo.io/t/{ORG_NAME}/scim2/Groups/{CUSTOMER_GROUP_ID}\" # Replace {group_id} with the actual group ID\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': f'Bearer {token}' # Replace {access_token} with the actual access token\n }\n\n payload = {\n \"Operations\": [\n {\n \"op\": \"add\",\n \"value\": {\n \"members\": [\n {\"display\": f\"{username}\", \"value\": f\"{user_id}\"}\n ]\n }\n }\n ],\n \"schemas\": [\"urn:ietf:params:scim:api:messages:2.0:PatchOp\"]\n }\n\n response = requests.patch(url, json=payload, headers=headers)\n if response.ok:\n return response.json()\n else:\n response.raise_for_status()\n\nif __name__ == '__main__':\n app.run(port=3000)\n","repo_name":"Achintha444/kfone_admin_app_flutter","sub_path":"kfone_admin_backend/hellofly.py","file_name":"hellofly.py","file_ext":"py","file_size_in_byte":19681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33372305291","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom django.db.models import Count\nfrom django.core.paginator import Paginator\n\nfrom .models import Post, Category, Comment\nfrom .forms import RegisterForm, CommentForm\n\n# Create your views here.\ndef categories_base(request):\n return {\n 'categories_base': Category.objects.all()[:5],\n }\n\ndef category(request, pk):\n category = get_object_or_404(Category, pk=pk)\n category_list = Post.objects.filter(category=category)\n context = {\n 'category': category,\n 'category_list': category_list,\n }\n return render(request, 'category.html', context)\n\ndef categories(request):\n categories = Category.objects.all()\n context = {\n 'categories': categories,\n }\n return render(request, 'categories.html', context)\n\ndef user_login(request):\n if request.method == 'POST':\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('/')\n else:\n messages.error(request, 'contraseña o nombre de usuario incorrecto')\n return redirect('user_login')\n else:\n pass\n return render(request, 'autho/user_login.html')\n\ndef user_logout(request):\n logout(request)\n return redirect('/')\n\ndef user_register(request):\n if request.method == 'POST':\n form = RegisterForm(request.POST) \n if form.is_valid():\n email = form.cleaned_data.get('email')\n if User.objects.filter(email=email).exists():\n messages.error(request, 'Este correo electrónico ya está registrado.')\n return redirect('user_register')\n else:\n user = form.save(commit=False)\n user.username = user.username.lower()\n user.save()\n messages.success(request, 'Te has registrado correctamente.')\n login(request, user)\n return redirect('/')\n else:\n form = RegisterForm() \n context = {\n 'form': form,\n }\n return render(request, 'autho/user_register.html', context)\n\ndef home(request):\n posts = Post.objects.all()[:12]\n context = {\n 'posts': posts,\n }\n return render(request, 'home.html', context)\n\ndef all_posts(request):\n posts_list = Post.objects.all()\n paginator = Paginator(posts_list, 24) # Show 25 contacts per page.\n\n page_number = request.GET.get(\"page\")\n posts = paginator.get_page(page_number)\n\n context = {\n 'posts': posts,\n }\n return render(request, 'all_post.html', context)\n\ndef search(request):\n searched = request.GET.get('searched', '')\n results_list = Post.objects.filter(title__icontains=searched)\n paginator = Paginator(results_list, 1) # Show 1 post per page.\n\n page_number = request.GET.get(\"page\")\n results = paginator.get_page(page_number)\n context = {\n 'results': results,\n 'searched': searched,\n }\n return render(request, 'search.html', context)\n\ndef details(request, pk):\n post = get_object_or_404(Post, pk=pk)\n categories_with_counts = Category.objects.annotate(post_count=Count('product'))[:10]\n recent_posts = Post.objects.all()[:5]\n\n comments = post.comments.filter(parent=None)\n if request.method == 'POST':\n comment_form = CommentForm(data=request.POST)\n if comment_form.is_valid():\n new_comment = comment_form.save(commit=False)\n new_comment.post = post\n new_comment.author = request.user # assuming you have user logged in\n parent_id = request.POST.get('parent_id')\n if parent_id:\n parent_comment = Comment.objects.get(id=parent_id)\n new_comment.parent = parent_comment\n new_comment.save()\n return redirect('details', pk=post.pk)\n else:\n comment_form = CommentForm()\n\n context = {\n 'post': post,\n 'categories_with_counts': categories_with_counts,\n 'recent_posts': recent_posts,\n 'comments': comments,\n 'comment_form': comment_form,\n }\n return render(request, 'details.html', context)\n\n","repo_name":"SebastianMou/Josue_Thesis_blog_project","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38205638199","text":"from GompEErtz import *\n\n\nnmin = 2\nsavefig = False\n\n#---------------------------------------------------------------\n# Calcular los ajustes dependiendo el porcentaje de sospechosos\n\n#positivos_only\ngom = GompEErtz(data_type='M',lugar='Chihuahua',nmin=nmin)\n\n#positivos + 40% sospeshosos\ngom_40sos = GompEErtz(data_type='M',lugar='Chihuahua',nmin=nmin,factor=0.4)\n\n#positivos + sospeshosos\ngom_100sos = GompEErtz(data_type='M',lugar='Chihuahua',nmin=nmin,factor=1)\n\n#-------\n#Plotting\n\nplt.ion()\nplt.figure()\n#plt.plot(gom.cases_daily,'+',color='lightcoral',label='Chihuahua Positivos')\nplt.bar(range(len(gom.cases_daily)),gom.cases_daily,color='lightcoral',label='Confirmados')\n\nplt.plot(gom.mfit_day,'k-',label='Gompertz Ajuste a Confirmados (G)')\nplt.plot(gom.mfit_pronostico_day,'-',color='red',label='Gompertz Pronostico a Confirmados (GP)')\nplt.plot(gom.mfit_day,'k-',label='')\n\n\nplt.plot(gom_40sos.mfit_pronostico_day,'--',color='firebrick',label='GP + 40% de Sospechosos')\nplt.plot(gom_100sos.mfit_pronostico_day,'-.',color='peru',label='GP + 100% de Sospechosos')\n\n#plt.vlines(x=[0,len(gom.mfit_day)],ymin=-10,ymax=100, color = 'Gray')\n\ntext(0, gom.cases_daily.max()/2,gom.dias[0], rotation=90, verticalalignment='top')\ntext(len(gom.mfit_day), gom.cases_daily.max()/2,gom.dias[-1], rotation=90, verticalalignment='top')\n#plt.xticks([0,len(gom.mfit_day)], [gom.dias[0],gom.dias[-1]], rotation='vertical')\n\nplt.vlines(len(gom.dias),-5,np.nanmax(gom_40sos.cases_daily)*1.1,colors='Gray',label=gom.dias[-1])\nplt.ylim(0,np.nanmax(gom_40sos.cases_daily)*1.05)\n\nplt.hlines(nmin,-1*nmin,200,linestyles='dotted',colors='Gray', label='Limite %i casos'%nmin)\nplt.xlim(-2,165)\n\nplt.legend()\nplt.xlabel('Dias desde %s '%gom.dia_init)\nplt.ylabel('Casos Diarios')\nplt.title('Casos diarios Mpio. Chih')\nif savefig:\n plt.savefig('../results/gom_pronostico_Mpio_%s.png'%gom.dated,dpi=300)\n\n#---------------------------------------------------------------\n# Calcular el cambio de pronostico devido a actualizacion de datos.\n\n\n#positivos + 40% sospeshosos\ngom_40sos_0606 = GompEErtz(data_type='M',lugar='Chihuahua',nmin=nmin,factor=0.4,dated='20200606')\ngom_40sos_0614 = GompEErtz(data_type='M',lugar='Chihuahua',nmin=nmin,factor=0.4,dated='20200615')\ngom_40sos_0621 = GompEErtz(data_type='M',lugar='Chihuahua',nmin=nmin,factor=0.4,dated='20200621')\ngom_40sos_0626 = GompEErtz(data_type='M',lugar='Chihuahua',nmin=nmin,factor=0.4,dated='20200626')\ngom_40sos_0707 = GompEErtz(data_type='M',lugar='Chihuahua',nmin=nmin,factor=0.4,dated='20200707')\ngom_40sos_0715 = GompEErtz(data_type='M',lugar='Chihuahua',nmin=nmin,factor=0.4,dated='20200715')\n\n#-------\n#Plotting\n\nplt.ion()\nplt.figure()\n\nplt.plot(gom_40sos_0606.mfit_pronostico_day,'-.',color='gold',label='GP+40 ; Junio 06')\nplt.plot(gom_40sos_0614.mfit_pronostico_day,'-.',color='orange',label='GP+40 ; Junio 15')\nplt.plot(gom_40sos_0621.mfit_pronostico_day,'--',color='darkorange',label='GP+40 ; Junio 21')\nplt.plot(gom_40sos_0626.mfit_pronostico_day,'--',color='peru',label='GP+40 ; Junio 26')\nplt.plot(gom_40sos_0707.mfit_pronostico_day,'--',color='firebrick',label='GP+40 ; Julio 07')\nplt.plot(gom_40sos_0715.mfit_pronostico_day,'-.',color='red',label='GP+40 ; Julio 15')\n\nplt.vlines(len(gom.dias),-5,np.nanmax(gom_40sos.mfit_pronostico_day)*1.1,colors='Gray',label=gom.dias[-1])\nplt.ylim(-0.5,np.nanmax(gom_40sos.mfit_pronostico_day)*1.05)\n\nplt.hlines(nmin,-1*nmin,200,linestyles='dotted',colors='Gray', label='Limite %i casos'%nmin)\nplt.xlim(-2,165)\n\nplt.legend()\nplt.xlabel('Dias desde %s '%gom_40sos_0606.dia_init)\nplt.ylabel('Casos Diarios')\nplt.title('Casos diarios Mpio. Chihuahua')\nif savefig:\n plt.savefig('../results/gom_pronostico_Mpio_multi_%s.png'%gom.dated,dpi=300)\n\n#---------------------------------------------------------------\n# Calcular el cambio de pronostico con respecto al limites iniciales\n\n#positivos + 40% sospeshosos\ngom_40sos_nmin1 = GompEErtz(data_type='M',lugar='Chihuahua',nmin=1,factor=0.4)\ngom_40sos_nmin2 = GompEErtz(data_type='M',lugar='Chihuahua',nmin=2,factor=0.4)\ngom_40sos_nmin5 = GompEErtz(data_type='M',lugar='Chihuahua',nmin=5,factor=0.4)\n\n#-------\n#Plotting\n\n#Issue : start day\n\nplt.ion()\nplt.figure()\n\nplt.bar(range(len(gom_40sos_nmin1.cases_daily)),gom_40sos_nmin1.cases_daily,color='lightcoral',label='Chihuahua Confirmados')\n\nplt.plot(gom_40sos_nmin1.mfit_pronostico_day,'--',color='firebrick',label='GP+40 ; lim 1 caso')\nplt.plot(list(np.ones(18)*np.NaN)+list(gom_40sos_nmin2.mfit_pronostico_day),'--',color='peru',label='GP+40 ; lim 2 casos')\nplt.plot(list(np.ones(33)*np.NaN)+list(gom_40sos_nmin5.mfit_pronostico_day),'--',color='olive',label='GP+40 ; lim 5 casos')\n\nnmin=1\nplt.hlines(nmin,-1*nmin,200,linestyles='dotted',colors='Gray', label='Limite %i casos'%nmin)\nnmin=5\nplt.hlines(nmin,-1*nmin,200,linestyles='dotted',colors='k', label='Limite %i casos'%nmin)\nplt.xlim(-2,165)\n\nplt.vlines(len(gom.dias),-5,np.nanmax(gom_40sos.cases_daily)*1.1,colors='Gray',label=gom.dias[-1])\nplt.ylim(0,np.nanmax(gom_40sos.cases_daily)*1.05)\n\nplt.legend()\nplt.xlabel('Dias desde %s '%gom_40sos_nmin1.dia_init)\nplt.ylabel('Casos Diarios')\nplt.title('Casos diarios Mpio. Chihuahua')\nif savefig:\n plt.savefig('../results/gom_pronostico_Mpio_multilim_%s.png'%gom.dated,dpi=300)\n\n#-------\n#Plotting\n\ngom_40sos.plot_tot_fit()\nif savefig:\n plt.savefig('../results/gom_pronostico_Mpio_fit_%s.png'%gom.dated,dpi=300)\n\n","repo_name":"jeenriquez/GompEErtz","sub_path":"GompEErtz/GompEErtz_Mpio_Chih.py","file_name":"GompEErtz_Mpio_Chih.py","file_ext":"py","file_size_in_byte":5406,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37285394488","text":"#\n# @lc app=leetcode id=424 lang=python3\n#\n# [424] Longest Repeating Character Replacement\n#\n# https://leetcode.com/problems/longest-repeating-character-replacement/description/\n#\n# algorithms\n# Medium (44.30%)\n# Likes: 547\n# Dislikes: 48\n# Total Accepted: 32.8K\n# Total Submissions: 73.9K\n# Testcase Example: '\"ABAB\"\\n2'\n#\n# Given a string that consists of only uppercase English letters, you can\n# replace any letter in the string with another letter at most k times. Find\n# the length of a longest substring containing all repeating letters you can\n# get after performing the above operations.\n# \n# Note:\n# Both the string's length and k will not exceed 10^4.\n# \n# \n# \n# Example 1:\n# \n# Input:\n# s = \"ABAB\", k = 2\n# \n# Output:\n# 4\n# \n# Explanation:\n# Replace the two 'A's with two 'B's or vice versa.\n# \n# \n# \n# \n# Example 2:\n# \n# Input:\n# s = \"AABABBA\", k = 1\n# \n# Output:\n# 4\n# \n# Explanation:\n# Replace the one 'A' in the middle with 'B' and form \"AABBBBA\".\n# The substring \"BBBB\" has the longest repeating letters, which is 4.\n# \n# \n#\nfrom collections import defaultdict\n\nclass Solution:\n def characterReplacement(self, s: str, k: int) -> int:\n\n count = defaultdict(int)\n maxn = i = j = 0\n for i, v in enumerate(s, 1):\n count[v] += 1\n maxn = max(maxn, count[v])\n if i - j - maxn > k:\n count[s[j]] -= 1\n j += 1\n\n return i - j\n\n \n\n","repo_name":"chenxu0602/LeetCode","sub_path":"424.longest-repeating-character-replacement.py","file_name":"424.longest-repeating-character-replacement.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"4319850685","text":"import numpy as np\r\nimport cv2\r\n\r\npixel_size = 256\r\nrandom_point = 2\r\n\r\n\r\ndef makepoint(way=2):\r\n if way == 1:\r\n \"\"\"way 1\"\"\"\r\n radi = 60\r\n pixel_center = int(pixel_size / 2)\r\n high1 = pixel_center + np.random.randint(-radi, radi, size=random_point)\r\n high2 = pixel_center + np.random.randint(-radi, radi, size=random_point)\r\n elif way == 2:\r\n \"\"\"way 2\"\"\"\r\n pointmin, pointmax = 100, 156\r\n high1 = np.random.randint(pointmin, pointmax, size=random_point)\r\n high2 = np.random.randint(pointmin, pointmax, size=random_point)\r\n else:\r\n high1 = np.linspace(0, 255, 1, dtype='int')\r\n high2 = high1\r\n print(high1, '\\n', high2)\r\n return high1, high2\r\n\r\n\r\ndef logplt(im0, getlog=1, windowname='unname'):\r\n im0 = np.abs(im0)\r\n if getlog == 1:\r\n im0plt = np.log(1 + im0) / np.log(2)\r\n else:\r\n im0plt = im0\r\n cv2.imshow('{}'.format(windowname), im0plt)\r\n\r\n\r\ndef realpic(im0):\r\n im1 = np.fft.fft2(im0) # np.fft.ifftshift(im0)\r\n im2 = np.fft.fftshift(im1) # np.fft.ifft2(im1)\r\n im3 = np.abs(im2)\r\n im3 = im2 / np.max(im3)\r\n im4 = np.arctan(np.imag(im2) / np.real(im2))\r\n im4 = np.nan_to_num(im4)\r\n # im4 = (im4 + np.pi / 2) / np.pi\r\n im4 = abs(im4) * 2 / np.pi\r\n print(im4.max())\r\n return im3, im4\r\n\r\n\r\ndef white(im0, expand=0):\r\n high1, high2 = makepoint()\r\n if expand == 0:\r\n for i in range(len(high1)):\r\n im0[high1[i], high2[i]] = 1.0\r\n else:\r\n for i in range(len(high1)):\r\n im0[high1[i] - expand:high1[i] + expand, high2[i] - expand:high2[i] + expand] = 1.0\r\n return im0\r\n\r\n\r\ndef show_pic(img):\r\n logplt(img, 1, 'fourie')\r\n rimg = realpic(img)\r\n logplt(rimg[0], 0, 'real_abs')\r\n logplt(rimg[1], 0, 'real_phase')\r\n cv2.waitKey(0)\r\n\r\n\r\ndef main():\r\n img = np.zeros([pixel_size, pixel_size], dtype='float')\r\n img = white(img)\r\n show_pic(img)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"YuLingFengSCNU2017/MoireFitting","sub_path":"MoireTest/CreateMoireInPhase.py","file_name":"CreateMoireInPhase.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"34082817679","text":"from _1gc import Character\n\n\nclass Monster(Character):\n _total_count = 0\n\n def __init__(self):\n super().__init__()\n Monster._total_count += 1\n\n\nclass FireMonster(Monster):\n _total_count = 0\n\n def __init__(self):\n super().__init__()\n FireMonster._total_count += 1\n print(\"['%-17s'] object is created...\"% self.__class__.__name__)\n\n\nclass IceMonster(Monster):\n\n def __init__(self):\n super().__init__()\n IceMonster._total_count += 1\n print(\"['%-17s'] object is created...\"% self.__class__.__name__)\n\n\n\n\nif __name__ == '__main__':\n a = FireMonster()\n b = IceMonster()\n\n print(\"Number of Characters =\", Character._total_count)\n print(\"Number of Monsters =\", Monster._total_count)\n","repo_name":"onito/hello_python","sub_path":"_static/module_custom/_2gm.py","file_name":"_2gm.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39820189733","text":"######################################################################################\n# Program Name: wall_bouncer.py\n# Written by: Will Ward\n#\n# Control program for Raspberry Pi powered roomba-like robot.\n#\n# Program Flow:\n# 1. Enable the motors. Confirm the motors are enabled (check GPIO pins voltage level). \n# 2. Robot enters **Pausing** mode. Set the GREEN LED to be dimmer using PWM. Make sure\n# robot's motion is stopped in this mode.\n# 3. Press the button to enable the **Playing** mode, and the robot moves forward.\n# Green LED stays lit to indicate **Playing** mode. Use two distance sensors to \n# check for obstacles. Turn the robot away from obstacles if it gets too close.\n# 4. Press the button again to switch the back to **Pausing** mode. \n# 5. Pressing the button switches the mode back and forth. \n# 6. Record time consumption in **Playing** mode. After 60 seconds of play, yellow LED \n# turns on. After 90 seconds, red blinks 10 times, then the robot shuts down.\n# \n########################################################################################\n\n\n# Import python packages\nimport time\nfrom gpiozero import LED, PWMLED, Button, DistanceSensor, PhaseEnableRobot\n\n\n##################\n# Preperations \n##################\n\n# Initialize GPIO pins, LEDs, button, distance sensor and the robot \n\nmotor_pin_1 = LED(22) # motor 1 enable pin set to GPIO 22\nmotor_pin_2 = LED(23) # motor 2 enable pin set to GPIO 23\n\nred = PWMLED(10, frequency = 2) # red LED set to GPIO 10\ngreen = PWMLED(19, frequency = 1000) # green LED set to GPIO 19\nyellow = LED(8) # yellow LED set to GPIO 8 (no PWM)\n\nbutton = Button(27, hold_time=3) # Button 1 set to GPIO 27\nsensor1 = DistanceSensor(echo=16, trigger=17, max_distance = 4) # initialize ultrasonic sensor 1\nsensor2 = DistanceSensor(echo=18, trigger=14, max_distance = 4) # max distance in meters\n\nrobot = PhaseEnableRobot(left=(24, 12), right=(25,13)) # set GPIO pins for (direction, PWM) on each motor\n\n\n# Initiate variables such as mode, duty cycles, run time\n\nstate = False # play mode = True, pause mode = False\nrun_time = 0.0 # keeps track of total time in play mode\nred_time = 0.0 # keeps track of total time red is blinking\nduty_cycle = list(range(0, 101)) + list(range(100, -1, -1)) # duty cycle for green goes from 0 -> 101 -> -1\ni = 0 # used to increment duty_cycle\n\n\n# Enable the motors and confirm they are on\n\nmotor_pin_1.on() # enable motor1\nmotor_pin_2.on() # enable motor2\n\nif (motor_pin_1.is_active): # check motor1\n print(\"Motor 1 is on\")\nelse:\n print(\"Error - Motor 1 is off\")\n\nif (motor_pin_2.is_active): # check motor2\n print(\"Motor 2 is on\")\nelse:\n print(\"Error - Motor 2 is off\")\n\ntime.sleep(0.1) # pause for 0.1 seconds\n\n\n\n\n\n###########\n# Main\n###########\n\ntry: # loop continuously until ctrl-c is pressed\n while True:\n if button.is_pressed: # if button pressed, switch mode\n print(\"Play/Pause pressed\")\n button.wait_for_release() # waits for you to release button\n print(\"Play/Pause released\")\n state = not state # change mode\n\n \n if state: # if in Playing mode\n green.value = 1 # light up GREEN (full brightness)\n robot.forward(0.7) # move robot forward (fraction of full power)\n \n run_time += 0.02 # update run_time\n print(\"ON \")\n print(run_time)\n \n if run_time > 60:\n yellow.on() # light up YELLOW if play mode for 1 minute\n if run_time > 90:\n red.value = 0.5 # light up RED if play mode for more than 90 seconds\n red_time += 0.02 # increment red on time\n if red_time > 10: # if RED on mroe than 10 seconds, break\n break\n \n if sensor1.distance < 0.6 or sensor2.distance < 0.6: # if distance between robot and obstacle < 0.6\n robot.left(0.7) # turn robot to the left\n \n time.sleep(.02) # pause for 0.02 seconds\n\n\n else: # otherwise, robot is in pause mode \n green.value = duty_cycle[i] / 100 # change GREEN's duty cycle\n i += 1\n if i >= len(duty_cycle):\n i = 0\n\n robot.stop() # turn robot off\n print(\"OFF \")\n #print(duty_cycle[i])\n time.sleep(.02)\n \nexcept KeyboardInterrupt:\n # Turn off all LEDs, the robot, and the motors\n red.off()\n yellow.off()\n green.off()\n robot.stop()\n motor_pin_1.off()\n motor_pin_2.off()\n print(\"\\nLEDs and Motors are turned off.\")\n\nfinally:\n # Ensure that all LEDS, the robot, and the motors are turned off\n red.off()\n yellow.off()\n green.off()\n robot.stop()\n motor_pin_1.off()\n motor_pin_2.off()\n print(\"\\nLEDs and Motors are turned off.\")\n","repo_name":"willward20/Robotics-1","sub_path":"Project-1-Wall-Bouncer/wall_bouncer.py","file_name":"wall_bouncer.py","file_ext":"py","file_size_in_byte":5034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30736134880","text":"from pymongo import MongoClient\nfrom src.config import DBURL\nfrom src.errorHandling import APIError, errorHandler\nfrom bson.json_util import dumps\nfrom bson.objectid import ObjectId\nfrom flask import Flask\nfrom src.app import app\n\n\n\nclient = MongoClient(DBURL)\ndb = client.get_database()\n\n\nuser_collec=db[\"users\"]\nchat_collec=db[\"chats\"]\nmess_collec=db[\"messages\"]\n\n\n@app.route(\"/\")\n@errorHandler\ndef hello():\n return f\"Welcome to my api\"\n\n\n@app.route(\"/user/create/\")\n@errorHandler\ndef newUser(username):\n users=user_collec.distinct(\"username\")\n if username in users:\n raise APIError (\"User already exists\")\n else:\n user={\"username\":username}\n user_collec.insert_one(user)\n res=user_collec.find_one({\"username\":username},{\"username\":1})\n return dumps(res)\n\n\n@app.route(\"/chat/create/\")\n@errorHandler\ndef newChat(chatname):\n chats=chat_collec.distinct(\"chat_name\")\n if chatname in chats:\n raise APIError (\"Chat name already exists, please insert another one\")\n else:\n infochat={\"chat_name\":chatname}\n chat_collec.insert_one(infochat)\n res=chat_collec.find_one({\"chat_name\":chatname},{\"_id\":1,\"chat_name\":1})\n return dumps(res)\n\n\n@app.route(\"/chat//adduser/\")\n@errorHandler\ndef addUser(chatname,user):\n chat_id=chat_collec.find_one({\"chat_name\":chatname},{\"_id\":1})\n if len(chat_id)==0:\n raise APIError (\"Chat doesn't exist, please check your spelling\")\n else:\n user_id=user_collec.find_one({\"username\":user},{\"_id\":1})\n chat_collec.update({ \"_id\":chat_id[\"_id\"]},{ \"$push\":{ \"participants\":user_id[\"_id\"]}})\n res=chat_collec.find_one({\"chat_name\":chatname},{\"participants\":1})\n users=[user_collec.find_one({\"_id\":part},{\"_id\":0,\"username\":1})[\"username\"] for part in res[\"participants\"]]\n dic={\"chat_name\":chatname,\"participants\":users}\n return dumps(dic)\n \n\n\n@app.route(\"/chat//user//addmessage/\")\n@errorHandler\ndef newMessage(chatname,username,message):\n chat_id=chat_collec.find_one({\"chat_name\":chatname},{\"_id\":1})\n if len(chat_id)==0:\n raise APIError (\"Chat doesn't exist, please check your spelling\")\n user_id=user_collec.find_one({\"username\":username},{\"_id\":1})\n if user_id==0:\n raise APIError (\"Username doesn't exist, please check your spelling\")\n else:\n message_info={\"user\":user_id[\"_id\"],\"username\":username, \"chat\":chat_id[\"_id\"],\"chat_name\": chatname, \"message\":message}\n mess_collec.insert_one(message_info)\n res=mess_collec.find_one({\"message\":message},{\"_id\":0})\n dic={\"chat_name\":chatname,\"username\":username,\"message\":res[\"message\"]}\n return dumps(dic)\n\n\n@app.route(\"/chat//list\")\n@errorHandler\ndef getMessages(chatname):\n res=mess_collec.find({\"chat_name\":chatname},{\"chat_name\":1,\"message\":1})\n lista=list(res)\n a=[lista[i][\"message\"] for i in range(len(lista))]\n dic={\"chat_name\":chatname,\"message\":a}\n return dumps(dic)\n ","repo_name":"sofiavpuebla/4.-Creating-an-API","sub_path":"src/creating.py","file_name":"creating.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72995484326","text":"from pysword.modules import SwordModules\nfrom pysword.bible import SwordBible\n\n\n\nclass Bibles:\n bible_collection = None\n sword = None\n def __init__(self, bibles_path):\n self.sword = SwordModules(bibles_path)\n self.bible_collection = self.sword.parse_modules()\n\n def get_versions(self):\n version_info = {}\n for version in self.bible_collection:\n version_info[version] = self.bible_collection[version]['description']\n return version_info\n\n def get_books(self,version):\n books = self.sword.get_bible_from_module(version).get_structure().get_books()\n book_dict = {}\n for book in books['ot']:\n book_dict[book.osis_name] = { \"name\": book.name }\n book_dict[book.osis_name][\"chapters\"] = dict(zip(list(range(1,book.num_chapters+1)),book.chapter_lengths))\n for book in books['nt']:\n book_dict[book.osis_name] = { \"name\": book.name}\n book_dict[book.osis_name][\"chapters\"] = dict(zip(list(range(1,book.num_chapters+1)),book.chapter_lengths))\n return book_dict\n\n def acceptable_book(self,version):\n abn = []\n books = self.sword.get_bible_from_module(version).get_structure().get_books()\n for book in books['ot']:\n abn.push(book.osis_name)\n abn.push(book.name)\n for book in books['nt']:\n abn.push(book.osis_name)\n abn.push(book.name)\n return abn\n \n\n def get_text(self, version, books=None, chapters=None, verses=None):\n if version in self.get_versions().keys():\n version = version\n else:\n version = 'DRC'\n if verses is not None:\n verses=list(range(verses[0],verses[1]+1))\n print(verses)\n return_dict = {\"version\": version}\n return_dict[\"books\"] = books\n return_dict[\"chapters\"] = chapters\n text = {}\n if type(books) is list:\n for book in books:\n for chapter in chapters:\n text[chapter] = list(self.sword.get_bible_from_module(version).get_iter(book,chapter,verses))\n if verses is None:\n addedv = list(range(1,len(rtxt)+1))\n else:\n addedv = verses\n text[chapter] = dict(zip(addedv,rtxt))\n return_dict[\"text\"][book] = text\n elif type(chapters) is list:\n for chapter in chapters:\n rtxt = list(self.sword.get_bible_from_module(version).get_iter(books,chapter,verses))\n if verses == None:\n addedv = list(range(1,len(rtxt)+1))\n else:\n addedv = verses\n text[chapter] = dict(zip(addedv,rtxt))\n return_dict[\"text\"] = {}\n return_dict[\"text\"][books] = text\n return return_dict\n ","repo_name":"davidmon21/able","sub_path":"able_back/bibles.py","file_name":"bibles.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25018144624","text":"from rest_framework import generics\nfrom rest_framework.response import Response\nfrom rest_framework.views import status\n\nfrom .models import Process, Assets, Groups, DataClassification, DataMaps, DataItems, DataSubject, SubjectSource\nfrom .serializers import AssetSerializer, ProcessSerializer, GroupSerializer, DataClassificationSerializer, \\\n DataMapSerializer, UserSerializer, DataItemsSerializer, DataSubjectsSerializer, LoginSerializer, \\\n SubjectSourceSerializer\n\n\nclass RegistrationAPIView(generics.CreateAPIView):\n \"\"\"Register new user\"\"\"\n serializer_class = UserSerializer\n\n def post(self, request):\n request.data['username'] = request.data['username'].lower()\n user = request.data\n\n serializer = self.serializer_class(\n data=user, context={'request': request})\n serializer.is_valid(raise_exception=True)\n serializer.save()\n success_message = {\n \"success\": \"User was successfully registered\",\n \"data\": serializer.data\n }\n return Response(success_message, status=status.HTTP_201_CREATED)\n\n\nclass LoginAPIView(generics.CreateAPIView):\n \"\"\"Login a registered user\"\"\"\n serializer_class = LoginSerializer\n\n def post(self, request):\n user = request.data\n serializer = self.serializer_class(data=user)\n serializer.is_valid(raise_exception=True)\n success_message = {\n \"Success\": \"Successfully login\",\n \"data\": serializer.data\n }\n\n return Response(success_message, status=status.HTTP_200_OK)\n\n\ndef get_asset(name):\n try:\n song = Assets.objects.get(name=name)\n return song\n except Assets.DoesNotExist:\n return Response(\n data={\"message\": \"Asset with name: {} does not exist\"},\n status=status.HTTP_404_NOT_FOUND\n )\n\n\nclass ListCreateAsset(generics.ListAPIView):\n \"\"\"Provides a GEt and POST method handler.\"\"\"\n queryset = Assets.objects.all()\n serializer_class = AssetSerializer\n\n def post(self, request):\n asset = Assets.objects.create(\n asset_name=request.data[\"asset_name\"],\n location=request.data[\"location\"],\n operator=request.data[\"operator\"],\n processor=request.data[\"processor\"]\n )\n return Response(data=AssetSerializer(asset).data, status=status.HTTP_201_CREATED)\n\n\nclass AssetDetailView(generics.RetrieveUpdateDestroyAPIView):\n queryset = Assets.objects.all()\n serializer_class = AssetSerializer\n\n def get(self, request, *args, **kwargs):\n try:\n asset = self.queryset.get(pk=kwargs[\"pk\"])\n return Response(AssetSerializer(asset).data)\n except Assets.DoesNotExist:\n return Response(\n data={\"message\": \"Asset with id: {} does not exist\".format(kwargs[\"pk\"])},\n status=status.HTTP_404_NOT_FOUND\n )\n\n def put(self, request, *args, **kwargs):\n try:\n asset = self.queryset.get(pk=kwargs[\"pk\"])\n serializer = AssetSerializer()\n update_asset = serializer.update(asset, request.data)\n return Response(AssetSerializer(update_asset).data)\n except Assets.DoesNotExist:\n return Response(\n data={\"message\": \"Asset with id: {} does not exist\".format(kwargs[\"pk\"])},\n status=status.HTTP_404_NOT_FOUND\n )\n\n def delete(self, request, *args, **kwargs):\n try:\n asset = self.queryset.get(pk=kwargs[\"pk\"])\n asset.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n except Assets.DoesNotExist:\n return Response(\n data={\"message\": \"Asset with id: {} does not exist\".format(kwargs[\"pk\"])},\n status=status.HTTP_404_NOT_FOUND\n )\n\n\nclass ListCreateGroupsView(generics.ListAPIView):\n queryset = Groups.objects.all()\n serializer_class = GroupSerializer\n\n def post(self, request, *args, **kwargs):\n group = Groups.objects.create(\n group_name=request.data[\"group_name\"]\n )\n return Response(\n data=GroupSerializer(group).data,\n status=status.HTTP_201_CREATED\n )\n\n\nclass GroupDetailView(generics.RetrieveUpdateDestroyAPIView):\n queryset = Groups.objects.all()\n serializer_class = GroupSerializer\n\n def get(self, request, *args, **kwargs):\n try:\n group = self.queryset.get(pk=kwargs[\"pk\"])\n return Response(GroupSerializer(group).data)\n except Groups.DoesNotExist:\n return Response(\n data={\"message\": \"Group with id: {} does not exist\".format(kwargs[\"pk\"])},\n status=status.HTTP_404_NOT_FOUND)\n\n def put(self, request, *args, **kwargs):\n try:\n group = self.queryset.get(pk=kwargs[\"pk\"])\n serializer = GroupSerializer()\n update_group = serializer.update(group, request.data)\n return Response(ProcessSerializer(update_group).data)\n except Groups.DoesNotExist:\n return Response(\n data={\"message\": \"Group with id: {} does not exist\".format(kwargs[\"pk\"])},\n status=status.HTTP_404_NOT_FOUND)\n\n def delete(self, request, *args, **kwargs):\n try:\n group = self.queryset.get(pk=kwargs[\"pk\"])\n group.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n except Groups.DoesNotExist:\n return Response(\n data={\"message\": \"Group with id: {} does not exist\".format(kwargs[\"pk\"])},\n status=status.HTTP_404_NOT_FOUND)\n\n\nclass ListCreateProcessView(generics.ListAPIView):\n queryset = Process.objects.all()\n serializer_class = ProcessSerializer\n\n def post(self, request, *args, **kwargs):\n process = Process.objects.create(\n process_name=request.data[\"process_name\"],\n owner=request.data[\"owner\"]\n )\n return Response(\n data=ProcessSerializer(process).data,\n status=status.HTTP_201_CREATED\n )\n\n\nclass ProcessDetailView(generics.RetrieveUpdateDestroyAPIView):\n queryset = Process.objects.all()\n serializer_class = ProcessSerializer\n\n def get(self, request, *args, **kwargs):\n try:\n process = self.queryset.get(pk=kwargs[\"pk\"])\n return Response(ProcessSerializer(process).data)\n except Process.DoesNotExist:\n return Response(\n data={\"message\": \"Process with id: {} does not exist\".format(kwargs[\"pk\"])},\n status=status.HTTP_404_NOT_FOUND)\n\n def put(self, request, *args, **kwargs):\n try:\n process = self.queryset.get(pk=kwargs[\"pk\"])\n serializer = ProcessSerializer()\n update_process = serializer.update(process, request.data)\n return Response(ProcessSerializer(update_process).data)\n except Process.DoesNotExist:\n return Response(\n data={\"message\": \"Process with id: {} does not exist\".format(kwargs[\"pk\"])},\n status=status.HTTP_404_NOT_FOUND)\n\n def delete(self, request, *args, **kwargs):\n try:\n process = self.queryset.get(pk=kwargs[\"pk\"])\n process.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n except Process.DoesNotExist:\n return Response(\n data={\"message\": \"Process with id: {} does not exist\".format(kwargs[\"pk\"])},\n status=status.HTTP_404_NOT_FOUND)\n\n\nclass ListCreateDataClassificationView(generics.ListAPIView):\n queryset = DataClassification.objects.all()\n serializer_class = DataClassificationSerializer\n\n def post(self, request, *args, **kwargs):\n data_classification = DataClassification.objects.create(\n data_name=request.data[\"data_name\"],\n description=request.data[\"description\"]\n )\n return Response(\n data=DataClassificationSerializer(data_classification).data,\n status=status.HTTP_201_CREATED\n )\n\n\nclass DataClassificationDetailView(generics.RetrieveUpdateDestroyAPIView):\n queryset = DataClassification.objects.all()\n serializer_class = DataClassificationSerializer\n\n def get(self, request, *args, **kwargs):\n try:\n dataClassification = self.queryset.get(pk=kwargs[\"pk\"])\n return Response(ProcessSerializer(dataClassification).data)\n except DataClassification.DoesNotExist:\n return Response(\n data={\"message\": \"Data Classification with id: {} does not exist\".format(kwargs[\"pk\"])},\n status=status.HTTP_404_NOT_FOUND)\n\n def put(self, request, *args, **kwargs):\n try:\n dataClassification = self.queryset.get(pk=kwargs[\"pk\"])\n serializer = DataClassificationSerializer()\n update_data = serializer.update(dataClassification, request.data)\n return Response(ProcessSerializer(update_data).data)\n except DataClassification.DoesNotExist:\n return Response(\n data={\"message\": \"Data Classification with id: {} does not exist\".format(kwargs[\"pk\"])},\n status=status.HTTP_404_NOT_FOUND)\n\n def delete(self, request, *args, **kwargs):\n try:\n dataClassification = self.queryset.get(pk=kwargs[\"pk\"])\n dataClassification.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n except DataClassification.DoesNotExist:\n return Response(\n data={\"message\": \"Data Classification with id: {} does not exist\".format(kwargs[\"pk\"])},\n status=status.HTTP_404_NOT_FOUND)\n\n\nclass ListCreateDataMapsView(generics.ListAPIView):\n queryset = DataMaps.objects.all()\n serializer_class = DataMapSerializer\n\n def post(self, request, *args, **kwargs):\n data_maps = DataMaps.objects.create(\n image=request.data[\"image\"],\n name=request.data[\"name\"]\n )\n return Response(\n data=DataMapSerializer(data_maps).data,\n status=status.HTTP_201_CREATED\n )\n\n\nclass DataMapsDetailView(generics.RetrieveUpdateDestroyAPIView):\n queryset = DataMaps.objects.all()\n serializer_class = DataMapSerializer\n\n def get(self, request, *args, **kwargs):\n try:\n data_maps = self.queryset.get(pk=kwargs[\"pk\"])\n return Response(DataMapSerializer(data_maps).data)\n except DataMaps.DoesNotExist:\n return Response(\n data={\"message\": \"Data Maps with id: {} does not exist\".format(kwargs[\"pk\"])},\n status=status.HTTP_404_NOT_FOUND)\n\n def put(self, request, *args, **kwargs):\n try:\n data_maps = self.queryset.get(pk=kwargs[\"pk\"])\n serializer = DataMapSerializer()\n update_maps = serializer.update(data_maps, request.data)\n return Response(DataMapSerializer(update_maps).data)\n except DataMaps.DoesNotExist:\n return Response(\n data={\"message\": \"Data Maps with id: {} does not exist\".format(kwargs[\"pk\"])},\n status=status.HTTP_404_NOT_FOUND)\n\n def delete(self, request, *args, **kwargs):\n try:\n data_maps = self.queryset.get(pk=kwargs[\"pk\"])\n data_maps.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n except DataMaps.DoesNotExist:\n return Response(\n data={\"message\": \"Data Maps with id: {} does not exist\".format(kwargs[\"pk\"])},\n status=status.HTTP_404_NOT_FOUND)\n\n\nclass ListCreateDataItemsView(generics.ListAPIView):\n queryset = DataItems.objects.all()\n serializer_class = DataItemsSerializer\n\n def post(self, request, *args, **kwargs):\n item = DataItems.objects.create(\n name=request.data[\"name\"],\n )\n return Response(\n data=DataItemsSerializer(item).data,\n status=status.HTTP_201_CREATED\n )\n\n\nclass DataItemsDetailView(generics.RetrieveUpdateDestroyAPIView):\n queryset = DataItems.objects.all()\n serializer_class = DataItemsSerializer\n\n def get(self, request, *args, **kwargs):\n try:\n item = self.queryset.get(pk=kwargs[\"pk\"])\n return Response(DataItemsSerializer(item).data)\n except DataItems.DoesNotExist:\n return Response(\n data={\"message\": \"Data item with id: {} does not exist\".format(kwargs[\"pk\"])},\n status=status.HTTP_404_NOT_FOUND)\n\n def put(self, request, *args, **kwargs):\n try:\n item = self.queryset.get(pk=kwargs[\"pk\"])\n serializer = DataItemsSerializer()\n update_item = serializer.update(item, request.data)\n return Response(DataItemsSerializer(update_item).data)\n except DataItems.DoesNotExist:\n return Response(\n data={\"message\": \"Data item with id: {} does not exist\".format(kwargs[\"pk\"])},\n status=status.HTTP_404_NOT_FOUND)\n\n def delete(self, request, *args, **kwargs):\n try:\n item = self.queryset.get(pk=kwargs[\"pk\"])\n item.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n except DataItems.DoesNotExist:\n return Response(\n data={\"message\": \"Data item with id: {} does not exist\".format(kwargs[\"pk\"])},\n status=status.HTTP_404_NOT_FOUND)\n\n\nclass ListCreateDataSubjectView(generics.ListAPIView):\n queryset = DataSubject.objects.all()\n serializer_class = DataSubjectsSerializer\n\n def post(self, request, *args, **kwargs):\n data_subject = DataSubject.objects.create(\n name=request.data[\"name\"],\n )\n return Response(\n data=DataSubjectsSerializer(data_subject).data,\n status=status.HTTP_201_CREATED\n )\n\n\nclass DataSubjectDetailView(generics.RetrieveUpdateDestroyAPIView):\n queryset = DataSubject.objects.all()\n serializer_class = DataSubjectsSerializer\n\n def get(self, request, *args, **kwargs):\n try:\n data_subject = self.queryset.get(pk=kwargs[\"pk\"])\n return Response(DataSubjectsSerializer(data_subject).data)\n except DataSubject.DoesNotExist:\n return Response(\n data={\"message\": \"Data subject with id: {} does not exist\".format(kwargs[\"pk\"])},\n status=status.HTTP_404_NOT_FOUND)\n\n def put(self, request, *args, **kwargs):\n try:\n data_subject = self.queryset.get(pk=kwargs[\"pk\"])\n serializer = DataSubjectsSerializer()\n update_subject = serializer.update(data_subject, request.data)\n return Response(DataSubjectsSerializer(update_subject).data)\n except DataSubject.DoesNotExist:\n return Response(\n data={\"message\": \"Data subject with id: {} does not exist\".format(kwargs[\"pk\"])},\n status=status.HTTP_404_NOT_FOUND)\n\n def delete(self, request, *args, **kwargs):\n try:\n data_subject = self.queryset.get(pk=kwargs[\"pk\"])\n data_subject.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n except DataSubject.DoesNotExist:\n return Response(\n data={\"message\": \"Data subject with id: {} does not exist\".format(kwargs[\"pk\"])},\n status=status.HTTP_404_NOT_FOUND)\n\n\nclass ListCreateSubjectSourceView(generics.ListAPIView):\n queryset = SubjectSource.objects.all()\n serializer_class = SubjectSourceSerializer\n\n def post(self, request, *args, **kwargs):\n\n subject = DataSubject.objects.get(pk=kwargs.get('sub_pk'))\n source = Assets.objects.get(pk=kwargs.get('src_pk'))\n serializer = SubjectSourceSerializer(\n data={\"subject\": subject.id, \"source\": source.id, \"name\": request.data.get('name')})\n if serializer.is_valid():\n data = serializer.save(name=request.data.get('name'),\n subject=subject, source=source)\n return Response(data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n # def post(self, request, *args, **kwargs):\n # serializer = SubjectSourceSerializer(data=request.data,\n # context={'request': request})\n # subject_serializer = DataSubjectsSerializer(data={}, context={'request': request})\n # source_serializer = AssetSerializer(data={}, context={'request': request})\n # if serializer.is_valid() and subject_serializer.is_valid() and source_serializer.is_valid():\n # subject = subject_serializer.save()\n # source = source_serializer.save()\n # serializer.save(data_subject=subject,\n # data_source=source\n # )\n # data = {\n # 'status': 'Success',\n # 'data': serializer.data\n # }\n # return Response(data, status=status.HTTP_201_CREATED)\n # data = {\n # 'status': 'error',\n # 'data': serializer.errors,\n # 'message': \"Kindly correct above errors\"\n # }\n # return Response(data, status=status.HTTP_400_BAD_REQUEST)\n","repo_name":"mmosoroohh/I_hub","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8165488132","text":"#-*- encoding: utf-8 -*-\nimport unittest\nimport random\n\ndef multiplied_sum(values) :\n ret = 0\n for index, value in enumerate(values) :\n ret = ret + value * (index+1)\n return ret\n\ndef multiplied_sum_diff(values, from_index, to_index) :\n diff = 0\n if to_index < from_index :\n diff = sum(values[to_index:from_index]) - values[from_index] * (from_index-to_index)\n elif to_index > from_index :\n diff = values[from_index] * (to_index - from_index) - sum(values[from_index+1:to_index+1])\n return diff\n\ndef max_after_shift(length, values) :\n ori_multiplied_sum = multiplied_sum(values)\n diff = 0\n for index, value in enumerate(values) :\n for left_index in range(0, index):\n newdiff = multiplied_sum_diff(values, index, left_index)\n if newdiff > diff :\n print(index,left_index,newdiff)\n diff = newdiff\n for right_index in range(index, length):\n newdiff = multiplied_sum_diff(values, index, right_index)\n if newdiff > diff :\n print(index,right_index,newdiff)\n diff = newdiff\n return ori_multiplied_sum + diff\n\nif __name__ == '__main__' :\n length = 4\n values = [4,3,2,5]\n print(max_after_shift(length, values))\n\n\nclass shift_test(unittest.TestCase) :\n def test_multiplied_sum(self) :\n self.assertEqual(multiplied_sum([4,3,2,5]), 36)\n def test_ex1(self):\n self.assertEqual(max_after_shift(4,[4,3,2,5]), 39)\n def test_ex2(self):\n self.assertEqual(max_after_shift(5,[1,1,2,7,1]), 49)\n # def test_gen(self):\n # [ random.randrange(0,1000000) for i in range(200000) ]\n #def test_huge(self) :\n # print (max_after_shift(self.huge_sample_length, self.huge_sample ) )\n","repo_name":"liketaehoon/algorithm-quiz","sub_path":"acmicpc.net/14180/bf.py","file_name":"bf.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11216540109","text":"#! /usr/bin/env python3\n\nimport os\nfrom os import listdir\nimport requests\n\n\nurl = \"http://34.71.218.134/fruits/\"\ndir = \"supplier-data/descriptions/\"\nlist_files = [f for f in listdir(dir)]\n\nfor file in list_files:\n\n types = [\"name\",\"weight\",\"description\",\"image_name\"]\n data = {}\n with open(dir + file, \"r\") as txtfile:\n x = 0\n for line in txtfile:\n data[types[x]] = line.rstrip('\\n')\n x += 1\n for key in [\"weight\"]:\n data[\"weight\"] = data[\"weight\"].strip(\" lbs\")\n data[\"weight\"] = int(data[\"weight\"])\n data[\"image_name\"] = (file.strip('.txt'))+'.jpeg'\n print(data)\n response = requests.post(url,json=data)\n if not response.status_code == 201:\n print('Something went wrong')\n\n","repo_name":"PedroMou1/Week-4-python-Google-certificate-last-course","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72036296806","text":"\"\"\"\nalien.py object\n\"\"\"\nimport os\nimport pygame\nfrom pygame.sprite import Sprite\nfrom bullet import Bullet\nimport random\n\n\nclass Alien(Sprite):\n \"\"\"docstring for Alien.\"\"\"\n\n def __init__(self, ui_settings, all_sprites, alien_bullets):\n super().__init__()\n self.ui_settings = ui_settings\n self.all_sprites = all_sprites\n self.alien_bullets = alien_bullets\n self.file_name = str(random.randrange(5, 9)) + '.png'\n self.image = pygame.image.load(os.path.join(\n ui_settings.images_path, self.file_name)).convert_alpha()\n # self.image = pygame.transform.scale(self.image,(40, 40))\n # self.image.set_colorkey(ui_settings.BLACK)\n # self.effects = pygame.mixer.Sound(os.path.join(ui_settings.sfx_path, 'expl1.ogg'))\n # self.effects.set_volume(0.1)\n self.rect = self.image.get_rect()\n self.radius = int(self.rect.width * .8 / 2)\n # pygame.draw.circle(self.image, self.ui_settings.GREEN, self.rect.center, self.radius )\n self.rect.x = self.rect.width\n self.rect.y = self.rect.height\n self.speedy = 1\n self.speedx = 1\n self.power = 1\n self.last_update = pygame.time.get_ticks()\n self.frame_rate = 1000\n\n def update(self):\n # self.rect.y += self.speedy\n self.rect.x += self.speedx\n now = pygame.time.get_ticks()\n if now - self.last_update > self.frame_rate:\n self.last_update = now\n self.shoot()\n # self.rect.x += self.speedx\n # if self.rect.top > self.ui_settings.HEIGHT + 10 or self.rect.left < -10 or self.rect.right > self.ui_settings.WIDTH + 10:\n # for n in range(5):\n # self.rect.x = self.rect.width\n # self.rect.y = self.rect.height\n # self.speedy = 1\n if self.rect.right >= self.ui_settings.WIDTH:\n self.speedx = -1\n self.rect.y += 1\n elif self.rect.left <= 0:\n self.speedx = 1\n self.rect.y += 1\n\n def shoot(self):\n # self.bullets = []\n bullet = Bullet(self.ui_settings, self.rect.centerx, self.rect.bottom, 10)\n bullet.effects.play()\n self.all_sprites.add(bullet)\n self.alien_bullets.add(bullet)\n\n # def check_edge(self):\n # if self.rect.right >= self.ui_settings.WIDTH:\n # self.speedx = -1\n # elif self.rect.left <= 0:\n # self.speedx = 1\n","repo_name":"daviz888/pygame_code","sub_path":"alien.py","file_name":"alien.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15864483419","text":"class Solution:\r\n\tdef isNumber(self, s: str) -> bool: \r\n\t\texp = False\r\n\t\texp_dig = False\r\n\t\tdig = False\r\n\t\tdec = False\r\n\t\tprev_char = None\r\n\t\tif not s: return False\r\n\r\n\t\tfor char in s: \r\n\t\t\tif char == \"+\" or char == \"-\":\r\n\t\t\t\tif prev_char != None and prev_char != \"E\" and prev_char != \"e\": return False\r\n\t\t\t\tprev_char = char\r\n\r\n\t\t\telif char == \".\":\r\n\t\t\t\tif dec == True or exp == True: return False\r\n\t\t\t\tdec = True\r\n\t\t\t\tprev_char = char\r\n\r\n\t\t\telif char == \"E\" or char == \"e\":\r\n\t\t\t\tif exp == True or dig == False: return False\r\n\t\t\t\texp = True\r\n\t\t\t\tprev_char = char\r\n\r\n\t\t\telif char.isnumeric():\r\n\t\t\t\tprev_char = char\r\n\t\t\t\tif exp == True: exp_dig = True\r\n\t\t\t\tdig = True\r\n\r\n\t\t\telse:\r\n\t\t\t\treturn False\r\n\t\tif dig == False: return False\r\n\t\tif exp == True and exp_dig == False: return False\r\n\r\n\r\n\t\treturn True\r\n","repo_name":"Jahnavi-Chunduru/-CrackYourInternship","sub_path":"valid number 49.py","file_name":"valid number 49.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"4548360981","text":"import numpy as np\nimport pandas as pd\nimport sklearn.cluster as cluster\nimport matplotlib.pyplot as plt\nfrom decimal import *\nimport gzip\nimport os\nfrom PIL import Image\nimport glob\n\nIMAGE_SIZE = 28\nFEATURE_SIZE = IMAGE_SIZE * IMAGE_SIZE + 1\nNUM_CLASS = 10\nLAMBDA = 0.001\n\n# extract the data from the file and insert 1 for the bias term\ndef extract_data(filename, num_images):\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float16)\n data = data.reshape(num_images, IMAGE_SIZE * IMAGE_SIZE)\n # normalise data\n data = normalise_data(data)\n # add 1 in the beginning for bias term\n data = np.insert(data, 0, 1, axis=1)\n return data\n\n# normalise the image data to lie between 0 to 1\ndef normalise_data(data):\n arr = data.reshape(data.shape[0] * data.shape[1],1)\n max = np.max(arr)\n min = np.min(arr)\n diff = max - min;\n x = arr / diff\n return x.reshape(data.shape)\n\n# data normalisation for USPS data \n# the background is white for USPS\n# the background is black for MNIST\ndef normalise_data_abs(data):\n arr = data.reshape(data.shape[0] * data.shape[1],1)\n max = np.max(arr)\n min = np.min(arr)\n diff = max - min;\n x = abs(arr - max) / diff\n return x.reshape(data.shape)\n\n# extract the labels of the input data from input file\ndef extract_label(filename, num_images):\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int16)\n return labels\n\n# extract USPS data\ndef extract_usps_data():\n files = [f for f in glob.glob(\"/Users/manpreetdhanjal/Downloads/proj3_images/Numerals/*[0-9]\")]\n\n data =np.empty((1, 784))\n usps_labels_training = []\n for f in files:\n img = glob.glob(f +\"/*.png\")\n \n for i in img:\n arr = get_image_data(i)\n data = np.append(data,arr,axis=0)\n usps_labels_training.append(int(f.split(\"/\")[-1]))\n print('data is',(data).shape)\n\n data = data[1:,:]\n data = normalise_data_abs(data)\n data = np.insert(data, 0, 1, axis=1)\n val = usps_labels_training\n usps_labels_training = np.reshape(usps_labels_training,(len(usps_labels_training),1))\n\n return (data, usps_labels_training)\n\ndef get_image_data(filename):\n imgData = (Image.open(filename).convert('L'))\n imgData = imgData.resize((28,28),Image.ANTIALIAS)\n imgData = np.asarray(imgData)\n s = 28 * 28\n img_wide = imgData.reshape(1, s)\n return img_wide\n\n# applies the softmax and returns array with probabilities of all classes\ndef softmax(prediction):\n exp_vec = np.exp(prediction)\n res_exp_vec = np.zeros((prediction.shape[0],prediction.shape[1]))\n for i in range(prediction.shape[0]):\n res_exp_vec[i,:] = exp_vec[i,:]/np.sum(exp_vec[i,:])\n return res_exp_vec\n\n# convert the input labels to Nx10 vector representing the probability for each class\ndef format_labels(labels):\n result = np.zeros((labels.size,NUM_CLASS))\n result[np.arange(result.shape[0]), labels] = 1\n return result\n\n# tunes the hyperparameters for neural networks\n# trains the NN for various values of eta, lambda and number of nodes\n# tests on validation dataset\ndef tune_parameters(train_input, train_label): \n # grid search\n eta_arr = [0.03, 0.1, 0.3]\n lambda_arr = [0.01, 0.03, 0.1, 0.3]\n hidden_node_num_arr = [100, 300, 500, 700]\n minibatch_size = 10000\n \n # optimal accuracy\n opt_accuracy = 0\n \n # divide into training & validation set\n validation_start = np.round(int(train_input.shape[0] * 0.9))\n training_input = train_input[0:validation_start-1,:]\n training_output = train_label[0:validation_start-1]\n validation_input = train_input[validation_start:train_input.shape[0],:]\n validation_output = train_label[validation_start:train_input.shape[0]]\n \n accuracy_arr = np.zeros((64,1))\n i=0\n for learning_param in eta_arr:\n for lambda_param in lambda_arr:\n for num_hidden_nodes in hidden_node_num_arr:\n # train the NN for given parameters\n [weight_1, weight_2] = train_neural_network(training_input, training_output, minibatch_size, \n learning_param, lambda_param, num_hidden_nodes)\n \n # test on validation set\n accuracy = test_neural_network(weight_1, weight_2, validation_input, validation_output)\n print(accuracy)\n accuracy_arr[i,0] = accuracy\n i=i+1\n if accuracy > opt_accuracy:\n eta_opt = learning_param\n lambda_opt = lambda_param\n num_nodes_opt = num_hidden_nodes\n \n return (eta_opt, lambda_opt, num_nodes_opt, accuracy_arr)\n \n# train neural network\ndef train_neural_network(training_input, train_labels, minibatch_size, learning_param, lambda_param, num_hidden_nodes):\n \n # array to save the accuracy for each iteration\n accuracy = np.zeros((1000, 1))\n # format labels to Nx10 matrix\n training_output = format_labels(train_labels)\n \n # parameters\n total_iterations = 1000\n i = 0\n output_nodes = 10\n \n # randomly initiate the weights for the two layers\n weight_1 = np.random.normal(loc=0, scale=0.3, size = (FEATURE_SIZE, num_hidden_nodes))\n weight_2 = np.random.normal(loc=0, scale=0.3, size = (num_hidden_nodes + 1, output_nodes))\n \n N = training_input.shape[0]\n \n while i < total_iterations:\n # train in minibatches - SGD\n for j in range(int(N/minibatch_size)):\n # get the limits for the input and outputs\n lower_bound = j * minibatch_size \n upper_bound = min((j+1)*minibatch_size, N)\n \n train_batch_input = training_input[lower_bound:upper_bound,:]\n train_batch_output = training_output[lower_bound:upper_bound,:]\n \n #60000x785 785x50 = 60000x50\n layer_1_prediction = np.matmul(train_batch_input, weight_1)\n # applying activation function to it, 60000x50\n layer_1_value = np.tanh(layer_1_prediction)\n #layer_1_value = relu(layer_1_prediction)\n # insert 1 for bias 60000x51\n layer_1_value_bias = np.insert(layer_1_value, 0, 1, axis=1)\n\n # 60000x51 51x10 = 60000x10\n layer_2_prediction = np.matmul(layer_1_value_bias, weight_2)\n exp_vec = softmax(layer_2_prediction)\n\n # 60000x10\n del_2 = exp_vec - train_batch_output\n # 60000x51 60000x10 = 51x10\n E_2 = np.matmul(layer_1_value_bias.T, del_2)\n temp = LAMBDA * weight_2;\n temp[0,:] = 0;\n weight_2 = weight_2 - learning_param/N * (E_2 + temp)\n\n # 60000x50 60000x10 51x10\n del_1 = (1 - np.square(layer_1_value)) * (np.matmul(del_2, weight_2[1:,:].T))\n # 60000x785 60000x50 = 785x50\n E_1 = np.matmul(train_batch_input.T, del_1)\n temp = LAMBDA * weight_1;\n temp[0,:] = 0;\n weight_1 = weight_1 - learning_param/N * (E_1 + temp)\n \n # calculate and save accuracy for each iteration \n accuracy[i,0] = test_neural_network(weight_1, weight_2, training_input, train_labels)\n print(accuracy[i,0])\n i=i+1\n\n return (weight_1, weight_2, accuracy)\n\n# returns accuracy of the trained model on given data\ndef test_neural_network(weight_1, weight_2, test_input, test_output):\n layer_1_prediction = np.tanh(np.matmul(test_input, weight_1))\n layer_1_value_bias = np.insert(layer_1_prediction, 0, 1, axis=1)\n layer_2_prediction = softmax(np.matmul(layer_1_value_bias, weight_2))\n predicted_output = np.argmax(layer_2_prediction, axis=1)\n corr = np.sum(np.equal(predicted_output,test_output))\n return corr/test_output.shape[0]\n\n# returns accuracy on test and usps data\ndef print_all_accuracies(weight_1, weight_2, train_input, train_label, test_input, test_label, usps_input, usps_label):\n # accuracy on train data\n train_acc = test_neural_network(weight_1, weight_2, train_input, train_label)\n # accuracy on test data\n test_acc = test_neural_network(weight_1, weight_2, test_input, test_label)\n # accuracy on usps data\n usps_acc = test_neural_network(weight_1, weight_2, usps_input, usps_label)\n \n print(\"Accuracy on train data:\")\n print(train_acc)\n print(\"Accuracy on test data:\")\n print(test_acc)\n print(\"Accuracy on USPS data:\")\n print(usps_acc)\n \n return train_acc, test_acc, usps_acc\n\n# run SNN\ndef SNN():\n # extract MNIST data \n test_input = extract_data('/Users/manpreetdhanjal/Downloads/t10k-images-idx3-ubyte.gz', 10000)\n test_label = extract_label('/Users/manpreetdhanjal/Downloads/t10k-labels-idx1-ubyte.gz', 10000)\n train_input = extract_data('/Users/manpreetdhanjal/Downloads/train-images-idx3-ubyte.gz', 60000)\n train_label = extract_label('/Users/manpreetdhanjal/Downloads/train-labels-idx1-ubyte.gz', 60000)\n\n # extract USPS data\n usps_input, usps_label = extract_usps_data()\n\n # tune hyper parameters using grid search \n [eta_opt, lambda_opt, num_nodes_opt, accuracy_arr] = tune_parameters(train_input, train_label)\n\n mini_batch_size = 10000\n #directly train neural network by providing the hyper parameter values\n train_neural_network(train_input, train_label, mini_batch_size, eta_opt, lambda_opt, num_nodes_opt)\n\n print_all_accuracies(weight_1, weight_2, train_input, train_label, test_input, test_label)\n\ndef main():\n SNN()\n\nmain()","repo_name":"deepika1804/Classification-of-Handwritten-digits","sub_path":"proj3code_bp/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9818,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"40718450588","text":"\"\"\"\nEnsure that our basic site configuration has been applied\n\nThis is intended for automated scenarios such as a fresh database server should\nbe configured on first run but a newly-launched container should not make any\nchanges. For convenience with Docker, the default values for each command-line\nargument will be retrieved from the environment.\n\nTasks:\n1. Ensure that at least one admin user account exists. If not, a new one will be\n created but it will have an unusable password to force use of the password\n reset process.\n2. Ensure that the Sites framework has the intended site name & domain\n\"\"\"\n\nimport os\n\nfrom django.contrib.auth.models import User\nfrom django.contrib.sites.models import Site\nfrom django.core.management.base import BaseCommand\nfrom django.db.transaction import atomic\n\n\nclass Command(BaseCommand):\n help = \"Ensure that core site configuration has been applied\" # NOQA: A003\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--admin-username\",\n default=os.environ.get(\"CONCORDIA_ADMIN_USERNAME\", \"admin\"),\n help=\"Admin user's username (default=%(default)s)\",\n )\n parser.add_argument(\n \"--admin-email\",\n default=os.environ.get(\"CONCORDIA_ADMIN_EMAIL\", \"crowd@loc.gov\"),\n help=\"Admin user's email address (default=%(default)s)\",\n )\n parser.add_argument(\n \"--site-name\",\n default=os.environ.get(\"HOST_NAME\", \"example.com\"),\n help=\"Site name (default=%(default)s)\",\n )\n parser.add_argument(\n \"--site-domain\",\n default=os.environ.get(\"HOST_NAME\", \"example.com\"),\n help=\"Site domain (default=%(default)s)\",\n )\n\n @atomic\n def handle(self, *, admin_username, admin_email, site_name, site_domain, **options):\n user, user_created = User.objects.get_or_create(\n username=admin_username, defaults={\"email\": admin_email}\n )\n user.is_staff = user.is_superuser = True\n\n if user.email != admin_email:\n self.stdout.write(\n f\"Changing {admin_username} email from {user.email} to {admin_email}\"\n )\n user.email = admin_email\n\n if user_created:\n user.set_unusable_password()\n\n user.full_clean()\n user.save()\n\n if user_created:\n self.stdout.write(\n f\"Created superuser {admin_username} account for {admin_email}.\"\n \" Use the password reset form to change the unusable password.\"\n )\n\n if site_domain != \"example.com\":\n updated = Site.objects.update(name=site_name, domain=site_domain)\n if updated:\n self.stdout.write(\n f\"Configured site with name {site_name} and domain {site_domain}\"\n )\n","repo_name":"LibraryOfCongress/concordia","sub_path":"concordia/management/commands/ensure_initial_site_configuration.py","file_name":"ensure_initial_site_configuration.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","stars":145,"dataset":"github-code","pt":"52"} +{"seq_id":"8664039277","text":"import unittest\nimport os\nimport json\nfrom percolate import parse_file, split_line, parse_phone_number, parse_lines, format_one, format_two, format_three, format_phone_number\n\n__location__ = os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__)))\n\nclass TestFormatOne(unittest.TestCase):\n\t\n\tdef test(self):\n\t\tline = ['Chandler', 'Kerri', '(623)-668-9293', 'pink', '12345']\n\t\tline_dict = {\n\t\t\t\"color\": \"pink\",\n\t\t\t\"firstname\": \"Kerri\",\n\t\t\t\"lastname\": \"Chandler\",\n\t\t\t\"phonenumber\": \"623-668-9293\",\n\t\t\t\"zipcode\": \"12345\"\n\t\t\t}\n\n\t\tself.assertEqual(\n\t\t\tformat_one(line), line_dict)\n\nclass TestFormatTwo(unittest.TestCase):\n\n\tdef test(self):\n\t\tline = ['James', 'Murphy', 'yellow', '83880', '018 154 6474']\n\t\tline_dict = {\n\t\t\t\"color\": \"yellow\",\n\t\t\t\"firstname\": \"James\",\n\t\t\t\"lastname\": \"Murphy\",\n\t\t\t\"phonenumber\": \"018-154-6474\",\n\t\t\t\"zipcode\": \"83880\"\n\t\t\t}\n\n\t\tself.assertEqual(\n\t\t\tformat_two(line), line_dict)\n\nclass TestFormatThree(unittest.TestCase):\n\tdef test(self):\n\t\tline = ['Booker T.', 'Washington', '87360', '373 781 7380', 'yellow']\n\t\tline_dict = {\n\t\t\t\"color\": \"yellow\",\n\t\t\t\"firstname\": \"Booker T.\",\n\t\t\t\"lastname\": \"Washington\",\n\t\t\t\"phonenumber\": \"373-781-7380\",\n\t\t\t\"zipcode\": \"87360\"\n\t\t\t}\n\n\t\tself.assertEqual(\n\t\t\tformat_three(line), line_dict)\n\nclass TestSplitLines(unittest.TestCase):\n\tdef test1(self):\n\t\tline = 'James Murphy, yellow, 83880, 018 154 6474'\n\t\tl = ['James', 'Murphy', 'yellow', '83880', '018 154 6474']\n\t\t\n\t\tself.assertEqual(split_line(line), l)\n\n\tdef test2(self):\n\t\tline = 'Booker T., Washington, 87360, 373 781 7380, yellow'\n\t\tl = ['Booker T.', 'Washington', '87360', '373 781 7380', 'yellow']\n\n\nclass TestParsePhoneNumber(unittest.TestCase):\n\tdef testFalse(self):\n\t\tself.assertEqual(\n\t\t\tparse_phone_number('123-12-1234'), False)\n\n\tdef testTrue(self):\n\t\tself.assertEqual(\n\t\t\tparse_phone_number('123 456 7890'), True)\n\nclass TestFormatPhoneNumber(unittest.TestCase):\n\tdef test(self):\n\t\tself.assertEqual(\n\t\t\tformat_phone_number('1231231234'), '123-123-1234')\n\nclass TestParseLines(unittest.TestCase):\n\n\tdef test_error(self):\n\t\t'''\n\t\tIn case of an error, parse_lines() should\n\t\treturn the line's index.\n\t\t'''\n\t\tline = split_line('Chandler, Kerri, (623)-668-9293, pink, 123123121')\n\t\tself.assertEqual(\n\t\t\tparse_lines(line), None)\n\n\tdef test_format_one(self):\n\t\tline = split_line('Chandler, Kerri, (623)-668-9293, pink, 12345')\n\n\t\tline_dict = {\n\t\t\t\"color\": \"pink\",\n\t\t\t\"firstname\": \"Kerri\",\n\t\t\t\"lastname\": \"Chandler\",\n\t\t\t\"phonenumber\": \"623-668-9293\",\n\t\t\t\"zipcode\": \"12345\"\n\t\t\t}\n\n\t\tself.assertEqual(\n\t\t\tparse_lines(line), line_dict)\n\n\tdef test_format_two(self):\n\t\tline = split_line('James Murphy, yellow, 83880, 018 154 6474')\n\t\tline_dict = {\n\t\t\t\"color\": \"yellow\",\n\t\t\t\"firstname\": \"James\",\n\t\t\t\"lastname\": \"Murphy\",\n\t\t\t\"phonenumber\": \"018-154-6474\",\n\t\t\t\"zipcode\": \"83880\"\n\t\t\t}\n\n\t\tself.assertEqual(\n\t\t\tparse_lines(line), line_dict)\n\n\tdef test_format_three(self):\n\t\tline = split_line('Booker T., Washington, 87360, 373 781 7380, yellow')\n\t\tline_dict = {\n\t\t\t\"color\": \"yellow\",\n\t\t\t\"firstname\": \"Booker T.\",\n\t\t\t\"lastname\": \"Washington\",\n\t\t\t\"phonenumber\": \"373-781-7380\",\n\t\t\t\"zipcode\": \"87360\"\n\t\t\t}\n\n\t\tself.assertEqual(\n\t\t\tparse_lines(line), line_dict)\n\nclass TestParseFile(unittest.TestCase):\n\tdef test(self):\n\t\tlines = ['Booker T., Washington, 87360, 373 781 7380, yellow',\n\t\t\t\t'Chandler, Kerri, (623)-668-9293, pink, 123123121',\n\t\t\t\t'James Murphy, yellow, 83880, 018 154 6474',\n\t\t\t\t'asdfawefawea']\n\n\t\texpected_output = {\n\t\t\t\t\t\"entries\": [\n\t\t\t\t\t{\n\t\t\t\t\t\"color\": \"yellow\",\n\t\t\t\t\t\"firstname\": \"James\",\n\t\t\t\t\t\"lastname\": \"Murphy\",\n\t\t\t\t\t\"phonenumber\": \"018-154-6474\",\n\t\t\t\t\t\"zipcode\": \"83880\"\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\"color\": \"yellow\",\n\t\t\t\t\t\"firstname\": \"Booker T.\",\n\t\t\t\t\t\"lastname\": \"Washington\",\n\t\t\t\t\t\"phonenumber\": \"373-781-7380\",\n\t\t\t\t\t\"zipcode\": \"87360\"\n\t\t\t\t\t}\n\t\t\t\t\t],\n\t\t\t\t\t\"errors\": [\n\t\t\t\t\t1,\n\t\t\t\t\t3\n\t\t\t\t\t]\n\t\t\t\t\t}\n\n\t\tself.assertEqual(parse_file(lines), expected_output)\n\n\n\n\nif __name__ == '__main__':\n\tunittest.main()","repo_name":"syurskyi/Python_Topics","sub_path":"115_testing/examples/Github/_Level_2/python_unittest_example-master/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3936,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"34315906361","text":"import sys\n\n\ndef calc_mem_usage(x, level=0):\n '''\n Фукнция подсчитывает рекурсивно сколько памяти занимает переменная\n :param x: переменная\n :param level: уровень расчета\n :return: объем занимаемой памяти\n '''\n res = sys.getsizeof(x)\n if hasattr(x, '__iter__'):\n if hasattr(x, 'items'):\n for key, value in x.items():\n res += calc_mem_usage(key, level + 1)\n res += calc_mem_usage(value, level + 1)\n elif not isinstance(x, str):\n for item in x:\n res += calc_mem_usage(item, level + 1)\n return res\n\n\ndef print_mem_usage(*args):\n '''\n Функция подсчитывает объем памяти, занимаемой переменными\n :param args: переменные для расчета\n :return: общий объем памяти, занимаемой переменными из args\n '''\n mem_usage = 0\n for v in args:\n mem_usage += calc_mem_usage(v)\n print(f'Всего занято {mem_usage} байт памяти')\n return mem_usage\n\n\ndef tracing_function(frame, event, arg):\n '''\n Фукнция трассирвки для профилирования кода\n :param frame: обязятельный аргумент - текущий кадр трассировки\n :param event: обязательный аргумент - событие 'call', 'line', 'return', 'exception', 'opcode'\n :param arg: обязательный аргумент - аргумент события\n :return: ссылка на себя\n '''\n if event == \"return\":\n # делаем видимой переменную для подсчета общего итога\n global mem_usage\n # расчитываем и выводим размер памяти, занимаемой переменными в контексте трассируемой функции\n for key in frame.f_locals.keys():\n size = calc_mem_usage(frame.f_locals[key])\n mem_usage += size\n print(f'{key} {type(frame.f_locals[key])}: {size}')\n return tracing_function\n\n# Оптимизируем код задания\n# 8. Вводятся три разных числа. Найти, какое из них является средним (больше одного, но меньше другого).\n\nprint('Введите три разных числа')\nn1 = int(input('Введите первое число: '))\nn2 = int(input('Введите второе число: '))\nn3 = int(input('Введите третье число: '))\n\n\ndef func_1(n1, n2, n3):\n m = n1\n if n1 == n2 or n1 == n3 or n3 == n2:\n print('Введены не разные числа')\n else:\n if n1 < n2 < n3 or n3 < n2 < n1:\n m = n2\n elif n2 < n3 < n1 or n1 < n3 < n2:\n m = n3\n print(f'Среднее число равно: {m}')\n\n\ndef func_2(n1, n2, n3):\n # упростим код, убрав переменную m - просто будем хранить среднее в n1\n\n if n1 == n2 or n1 == n3 or n3 == n2:\n print('Введены не разные числа')\n else:\n if n1 < n2 < n3 or n3 < n2 < n1:\n n1 = n2\n elif n2 < n3 < n1 or n1 < n3 < n2:\n n1 = n3\n print(f'Среднее число равно: {n1}')\n\n\nfunctions_list = [(func_1, (n1, n2, n3)), (func_2, (n1, n2, n3))]\n\nsys.settrace(tracing_function)\n\n# Вызовем тестируемые функции с трассирокой и сравним результаты\nfor func, args in functions_list:\n mem_usage = 0\n print(f'Расход памяти переменными в функции {func.__name__}:')\n sys.call_tracing(func, args)\n print(f'Всего занято {mem_usage} байт памяти')\n","repo_name":"MA32021/gbPythonAlgo","sub_path":"lesson-6.py","file_name":"lesson-6.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31583756569","text":"from .push_down_operation import PushDownOperation\nfrom oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass Sort(PushDownOperation):\n \"\"\"\n The information about the sort object.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new Sort object with values from keyword arguments. The default value of the :py:attr:`~oci.data_integration.models.Sort.model_type` attribute\n of this class is ``SORT`` and it should not be changed.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param model_type:\n The value to assign to the model_type property of this Sort.\n Allowed values for this property are: \"FILTER\", \"JOIN\", \"SELECT\", \"SORT\", \"QUERY\"\n :type model_type: str\n\n :param sort_clauses:\n The value to assign to the sort_clauses property of this Sort.\n :type sort_clauses: list[oci.data_integration.models.SortClause]\n\n \"\"\"\n self.swagger_types = {\n 'model_type': 'str',\n 'sort_clauses': 'list[SortClause]'\n }\n\n self.attribute_map = {\n 'model_type': 'modelType',\n 'sort_clauses': 'sortClauses'\n }\n\n self._model_type = None\n self._sort_clauses = None\n self._model_type = 'SORT'\n\n @property\n def sort_clauses(self):\n \"\"\"\n Gets the sort_clauses of this Sort.\n The sort clause.\n\n\n :return: The sort_clauses of this Sort.\n :rtype: list[oci.data_integration.models.SortClause]\n \"\"\"\n return self._sort_clauses\n\n @sort_clauses.setter\n def sort_clauses(self, sort_clauses):\n \"\"\"\n Sets the sort_clauses of this Sort.\n The sort clause.\n\n\n :param sort_clauses: The sort_clauses of this Sort.\n :type: list[oci.data_integration.models.SortClause]\n \"\"\"\n self._sort_clauses = sort_clauses\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n","repo_name":"oracle/oci-python-sdk","sub_path":"src/oci/data_integration/models/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"52"} +{"seq_id":"39342830317","text":"\n#!/usr/bin/env python\n\nimport pygame\nfrom pygame.locals import * # noqa\nimport sys\nimport random\nimport time\n\n\nIMAGES = {} #dict for score digit font\n\nclass FlappyBird:\n def __init__(self):\n self.screen = pygame.display.set_mode((400, 720))\n self.bird = pygame.Rect(50, 50, 30, 41)\n self.offset = random.randint(-11, 11) * 10\n \n self.background = pygame.image.load(\"assets/background.png\").convert()\n self.birdSprites = [pygame.image.load(\"assets/1.png\").convert_alpha(),\n pygame.image.load(\"assets/2.png\").convert_alpha(),\n pygame.image.load(\"assets/dead.png\")]\n self.wallUp = pygame.image.load(\"assets/bottom.png\").convert_alpha()\n self.wallDown = pygame.image.load(\"assets/top.png\").convert_alpha()\n self.gap = 130\n self.wallx = 400\n self.birdY = 350\n self.q_value = {}\n self.jump = 0\n self.jumpSpeed = 30\n self.gravity = 5\n self.dead = False\n self.sprite = 0\n self.counter = 0\n self.upRect = pygame.Rect(self.wallx,\n 450 - self.offset,\n self.wallUp.get_width() - 10,\n self.wallUp.get_height())\n self.downRect = pygame.Rect(self.wallx,\n -230 - self.offset,\n self.wallDown.get_width() - 10,\n self.wallDown.get_height())\n IMAGES['numbers'] = (\n pygame.image.load('assets/numbers/0.png').convert_alpha(),\n pygame.image.load('assets/numbers/1.png').convert_alpha(),\n pygame.image.load('assets/numbers/2.png').convert_alpha(),\n pygame.image.load('assets/numbers/3.png').convert_alpha(),\n pygame.image.load('assets/numbers/4.png').convert_alpha(),\n pygame.image.load('assets/numbers/5.png').convert_alpha(),\n pygame.image.load('assets/numbers/6.png').convert_alpha(),\n pygame.image.load('assets/numbers/7.png').convert_alpha(),\n pygame.image.load('assets/numbers/8.png').convert_alpha(),\n pygame.image.load('assets/numbers/9.png').convert_alpha()\n )\n with open(\"q_value\") as f:\n for line in f:\n pair = line.split(\":\")\n self.q_value[eval(pair[0])] = eval(pair[1])\n \n\n \n def showScore(self, score):\n \"\"\"displays score in center of screen\"\"\"\n scoreDigits = [int(x) for x in list(str(score))]\n totalWidth = 0 # total width of all numbers to be printed\n\n for digit in scoreDigits:\n totalWidth += IMAGES['numbers'][digit].get_width()\n\n Xoffset = (400 - totalWidth) / 2\n\n for digit in scoreDigits:\n self.screen.blit(IMAGES['numbers'][digit], (Xoffset, 720 * 0.1))\n Xoffset += IMAGES['numbers'][digit].get_width()\n\n\n def colliBot(self, rect1, rect2):\n if rect1.right >= rect2.left and rect1.left <= rect2.right and (rect1.bottom >= rect2.top):\n #print rect1.bottom, rect2.top\n return True\n return False\n\n def colliTop(self, rect1, rect2):\n if rect1.right >= rect2.left and rect1.left <= rect2.right and (rect1.top <= rect2.bottom):\n #print rect1.top, rect2.bottom\n return True\n return False\n\n def birdUpdate(self):\n\n if self.dead:\n self.bird[1] = 50\n #self.birdY = 50\n self.dead = False\n self.counter = 0\n self.wallx = 400\n self.offset = random.randint(-11, 11) * 10\n self.gravity = 5\n else:\n if self.jump > 0:\n self.wallx -= 5\n self.jumpSpeed -= 5\n self.bird.top -= self.jumpSpeed\n self.jump -= 5\n else:\n self.wallx -= 5\n self.bird.top += self.gravity\n self.gravity += 5\n if self.wallx < -80:\n self.wallx = 400\n self.counter += 1\n self.offset = random.randint(-11, 11) * 10\n #self.bird[1] = self.birdY\n self.upRect = pygame.Rect(self.wallx,\n 450 - self.offset,\n self.wallUp.get_width() - 10,\n self.wallUp.get_height())\n self.downRect = pygame.Rect(self.wallx,\n -230 -self.offset,\n self.wallDown.get_width() - 10,\n self.wallDown.get_height())\n if self.colliBot(self.bird, self.upRect):\n self.dead = True\n #print 'down'\n #if self.upRect.colliderect(self.bird):\n # self.dead = True\n # print 'down'\n if self.colliTop(self.bird, self.downRect):\n self.dead = True\n #print 'up'\n #if self.downRect.colliderect(self.bird):\n # self.dead = True\n # print 'up'\n if not 0 < self.bird[1] < 720:\n self.dead = True\n def ui_update(self):\n self.birdUpdate()\n #self.updateWalls()\n\n self.screen.fill((255, 255, 255))\n self.screen.blit(self.background, (0, 0))\n #self.screen.blit(self.wallUp,(self.wallx, 360 + self.gap - self.offset))\n self.screen.blit(self.wallUp,(self.upRect.left, self.upRect.top - 10))\n #self.screen.blit(self.wallDown,(self.wallx, 0 - self.gap - self.offset))\n self.screen.blit(self.wallDown,(self.downRect.left, self.downRect.top))\n \n if self.dead:\n self.sprite = 2\n elif self.jump:\n self.sprite = 1\n self.screen.blit(self.birdSprites[self.sprite], (self.bird.left, self.bird.top))\n if not self.dead:\n self.sprite = 0\n\n \n #self.birdUpdate()\n #self.updateWalls()\n self.showScore(self.counter)\n\n pygame.display.update()\n \n \n def action_jump(self):\n self.jump = 30\n self.gravity = 5\n self.jumpSpeed = 30\n\n # ai_mode true for ai play otherwise for human play\n def run(self, ai_mode):\n clock = pygame.time.Clock()\n cur_max = 0;\n i = 0\n while True:\n clock.tick(180)\n \n if not ai_mode:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n if (event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN) and not self.dead:\n self.action_jump()\n self.ui_update()\n else:\n i += 1\n pos_x = self.bird.right - self.upRect.right\n pos_y = self.bird.bottom - self.upRect.top\n cur_state = (pos_x, pos_y)\n if cur_state not in self.q_value:\n self.q_value[cur_state] = [0.0, 0.0]\n action = 0\n \n val = self.q_value[cur_state]\n action = val.index(max(val))\n if action == 1:\n self.action_jump()\n\n self.ui_update()\n next_pos_x = self.bird.right - self.upRect.right\n next_pos_y = self.bird.bottom - self.upRect.top\n next_state = (next_pos_x, next_pos_y)\n reward = 1\n if self.dead:\n reward = -1000\n if next_state not in self.q_value:\n self.q_value[next_state] = [0.0, 0.0]\n\n self.q_value[cur_state][action] += 0.7 * (reward + 1 * max(self.q_value[next_state]) - self.q_value[cur_state][action])\n\n\n \n\n if self.counter > cur_max:\n cur_max = self.counter\n sys.stdout.write(\"\\rBest score so far: %d\" % cur_max)\n sys.stdout.flush() \n sys.stdout.flush()\n\n # record q_value every 1000 iterations\n if i == 1000:\n i = 0\n with open('q_value', 'wb') as f:\n for(k, v) in self.q_value.items():\n f.write('%s:%s\\n' % (k, v))\n\n\n \n\nif __name__ == \"__main__\":\n FlappyBird().run(True)\n","repo_name":"zhy9036/FlappyBird_RL_Agent","sub_path":"flappybird.py","file_name":"flappybird.py","file_ext":"py","file_size_in_byte":8311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42014904243","text":"from flask import (jsonify,\n request,\n Response)\n\nfrom application.tasks.spiders import execute_spider\nfrom application.tasks.synchronization import execute_pipeline_task\nfrom application.db_extension.routines import python_dictionary_lookup\n\nfrom .helpers import seller_integration_bp\n\nfrom ..tasks import start_synchronization\n\n\n@seller_integration_bp.route('/lookup_attributes', methods=['POST'])\ndef route_lookup_attributes():\n body = request.get_json()\n sentence = body.get('text', '')\n # category_id = int(body.get('category_id', 1))\n attr_codes = body.get('attr_codes')\n result = python_dictionary_lookup(None, sentence, attr_codes)\n return jsonify(result)\n\n\n@seller_integration_bp.route('/reload_dictionary')\ndef route_reload_dictionary():\n from application.db_extension.dictionary_lookup.lookup import dictionary_lookup\n dictionary_lookup.update_dictionary_lookup_data()\n return jsonify({'msg': 'dictionary reloaded'})\n\n\n@seller_integration_bp.route(\n '/status'\n)\ndef index():\n return jsonify(\n {'status': 'ok'}\n )\n\n\n@seller_integration_bp.route(\n '/source//sync',\n strict_slashes=False\n)\ndef sync(source_id: int) -> Response:\n # content = request.get_json()\n # callback_url = content['callback_url']\n try:\n full = request.args.get('full', 1)\n full = bool((int(full)))\n except TypeError:\n full = False\n status = start_synchronization(source_id, full=full)\n return jsonify(\n {'data': {\n 'status': status,\n # 'callback_url': callback_url\n }\n }\n )\n\n\n@seller_integration_bp.route(\n '/source//scrape/',\n)\ndef scrape(source_id: int) -> Response:\n status = execute_spider(source_id)\n return jsonify(\n {\n 'data': {\n 'status': status,\n }\n }\n )\n\n\n@seller_integration_bp.route(\n '/source//execute_pipeline',\n)\ndef execute_pipeline(source_id: int) -> Response:\n task = execute_pipeline_task.delay(([True], True), source_id)\n return jsonify(\n {'data': {'status': task.id}}\n )\n","repo_name":"Venus713/m3-project","sub_path":"application/views/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29098344701","text":"import argparse\nimport hashlib\nimport unittest\n\n\ndef partA(seed, target=5):\n start = '0' * target\n seed = seed.encode('utf-8')\n\n n = 1\n while True:\n m = hashlib.md5()\n m.update(seed)\n m.update('{}'.format(n).encode('utf-8'))\n h = m.hexdigest()\n if h.startswith(start):\n return n\n n += 1\n\ndef partB(seed, target):\n return partA(seed, target)\n\n\nclass TestProblem(unittest.TestCase):\n\n def test_mine(self):\n self.assertEqual(partA('abcdef', target=3), 3337)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('input')\n args = parser.parse_args()\n\n with open(args.input) as fh:\n seed = fh.read().strip()\n\n print(partA(seed))\n print(partB(seed, target=6))\n","repo_name":"gunnihinn/advent-of-code","sub_path":"2015/src/04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7896560759","text":"from collections import deque\nimport sys\ninput = sys.stdin.readline\n\nd = [(-1,0),(1,0),(0,-1),(0,1),-1,1]\ndef bfs():\n q = deque(starts_idx)\n # for h, i, j in starts_idx: # 탐색 열에 담긴 모든 곳에 방문 표시\n # visited[h][i][j] = 1\n\n while q:\n h, i, j = q.popleft()\n\n for k in range(6):\n # 상하좌우 검사\n if k < 4:\n ni = i + d[k][0]; nj = j + d[k][1]\n if 0 <= ni < N and 0 <= nj < M and not visited[h][ni][nj]:\n q.append([h, ni, nj])\n visited[h][ni][nj] = visited[h][i][j] + 1\n # 박스 아래 위 검사\n else:\n nh = h + d[k]\n if 0 <= nh < H and not visited[nh][i][j]:\n q.append([nh, i, j])\n visited[nh][i][j] = visited[h][i][j] + 1\n\nM, N, H = map(int, input().split()) # M: 행 수 / N: 열 수 / H : 상자 수\narr = [[list(map(int, input().split())) for _ in range(N)] for _ in range(H)]\n# 1은 익은 토마토, 0은 익지 않은 토마토, -1은 토마토 존재X\n# 상, 하, 좌, 우, 앞, 뒤 6방향으로 퍼짐\n\nstarts_idx = []\nvisited = [[[0]*M for _ in range(N)] for _ in range(H)]\nfor h in range(H):\n for i in range(N):\n for j in range(M):\n if arr[h][i][j] == 1: # 토마토가 있는 모든 지점 탐색 열에 추가\n starts_idx.append([h,i,j])\n visited[h][i][j] = 1 # 방문표시\n elif arr[h][i][j] == -1: # 토마토가 들어있지 않은 곳은 탐색하지 않기 위해\n visited[h][i][j] = 1 # 방문 표시\nbfs()\nans = 0\nisNotMatrue = False\n# 토마토 최종 상태 검사\nfor box in visited:\n for row in box:\n if min(row) == 0: # 안익은 토마토가 하나라도 있으면 종료 > ans=-1\n ans = -1\n isNotMatrue = True\n break\n ans = max(ans, max(row)-1) # 토마토 익을 때까지 걸리는 시간 구하기(visited 내 최댓값-1)\n if isNotMatrue:\n break\nprint(ans)","repo_name":"Going777/Algorithm","sub_path":"BOJ/Gold/7569_토마토.py","file_name":"7569_토마토.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13301255140","text":"# -*- coding: utf-8 -*-\n\nfrom b3j0f.utils.ut import UTCase\nfrom unittest import main\n\nfrom link.parallel.driver import Driver\n\n\nclass TestBaseDriver(UTCase):\n def test_map(self):\n drv = Driver()\n\n callback = lambda doc: doc\n expected = [1, 2, 3, 4]\n result = list(drv.map(callback, expected))\n\n self.assertEqual(result, expected)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"linkdd/link.parallel","sub_path":"link/parallel/test/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28154340131","text":"import requests\r\nimport json\r\nimport time\r\nimport os\r\n\r\npath = os.getcwd()\r\nlogfile = path + os.path.sep + 'pinlog.txt'\r\n\r\ndef main():\r\n pinlog = []\r\n newlog = []\r\n\r\n #persistent means of avoiding duplicate pinning\r\n with open(logfile, 'r') as l:\r\n print('file opened')\r\n for line in l.readlines():\r\n pinlog.append(line)\r\n print(pinlog)\r\n\r\n #point to orbitdb instance\r\n r = requests.get('http://localhost:8080/list')\r\n\r\n response = json.loads(r.content)\r\n\r\n hash_list = response['Hash List']\r\n\r\n for hash in hash_list:\r\n match = False\r\n for pin in pinlog:\r\n if hash[\"content\"] in pin:\r\n match = True\r\n\r\n if match == False:\r\n print(f\"Calling IPFS API to pin: {hash['title']}\")\r\n\r\n #point to ipfs node\r\n p = requests.post(f'http://localhost:5001/api/v0/pin/add?arg={hash[\"content\"]}')\r\n\r\n print(p.content)\r\n newlog.append(f\"{hash['title']} - {hash['content']}\\n\")\r\n\r\n with open(logfile, 'a') as a:\r\n for item in newlog:\r\n a.write(item)\r\n\r\n\r\nopen(logfile, 'a').close()\r\n\r\nwhile True:\r\n main()\r\n #this was built with a content creator in mind that\r\n #only posts once a week so checking more than once\r\n #a day is aggressive but wasn't sure how it would\r\n #work or not work with such a long sleep interval\r\n time.sleep(43200)\r\n","repo_name":"black-lotus713/orbitdb_ipfs_pubsub_poc_client_service","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29191077064","text":"import json\nimport requests\nfrom app import configuration as C\n\n\nheaders = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n 'User-Agent': 'ZTP Server'\n}\n\n\ndef notify_slack(msg):\n url = 'https://hooks.slack.com/services/' + C.SLACK_TOKEN\n data = {}\n data['text'] = msg\n requests.post(url, headers=headers, data=json.dumps(data))\n","repo_name":"networklore/ztp-tutorial","sub_path":"tutorial/package/app/notifications.py","file_name":"notifications.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"52"} +{"seq_id":"8740268681","text":"# Problem 65\n# Convergents of e\n\nconstants = []\nfor x in range(1,40):\n constants.append(1)\n constants.append(2 * x)\n constants.append(1)\n\nnum = 1\nden = 1\ncount = 0\n\nfor x in range(97,-1,-1):\n new_num = den\n den = num + den*constants[x]\n num = new_num\n\nprint(f'num {num+den*2}\\n-------\\nden {den}\\n')\n\ndigArr = [int(dig) for dig in str(num+den*2)]\nprint(sum(digArr))\n","repo_name":"cavandervoort/Project-Euler-001-to-100","sub_path":"Euler_065.py","file_name":"Euler_065.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11337654668","text":"import os\n\nfrom flask import Blueprint, jsonify\n\nfrom app.services.graph import GraphService\n\nauth = Blueprint('auth', __name__)\n\n\n@auth.route('/auth/login')\ndef login():\n graph = GraphService()\n me = graph.get_me()\n try:\n id = me[\"id\"]\n except Exception as e:\n raise\n return jsonify({'result': id})\n\n@auth.route('/auth/signup')\ndef signup():\n return 'signed up'\n\n\n","repo_name":"robinsondotnet/pythonstuff","sub_path":"app/api/v1/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74661296163","text":"# 首先导入相关模块,并设置系统环境:\r\n\r\nimport argparse\r\nimport os\r\nimport numpy as np\r\nimport math\r\nimport itertools\r\nimport sys\r\n\r\nimport torchvision.transforms as transforms\r\nfrom torchvision.utils import save_image, make_grid\r\n\r\nfrom torch.utils.data import DataLoader\r\nfrom torch.autograd import Variable\r\n\r\n# from models import *\r\n# from datasets import *\r\n\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch\r\n\r\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\" # 设置系统可见的GPU从1号开始\r\ntorch.cuda.set_device(1)\r\nos.makedirs(\"images\", exist_ok=True)\r\nos.makedirs(\"saved_models\", exist_ok=True)\r\n\r\n# 然后设置初始参数,并打印\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"--epoch\", type=int, default=0, help=\"epoch to start training from\")\r\nparser.add_argument(\"--n_epochs\", type=int, default=100, help=\"number of epochs of training\")\r\n# parser.add_argument(\"--dataset_name\", type=str, default=\"img_align_celeba\", help=\"name of the dataset\")\r\nparser.add_argument(\"--batch_size\", type=int, default=16, help=\"size of the batches\")\r\nparser.add_argument(\"--lr\", type=float, default=0.0002, help=\"adam: learning rate\")\r\nparser.add_argument(\"--b1\", type=float, default=0.5, help=\"adam: decay of first order momentum of gradient\")\r\nparser.add_argument(\"--b2\", type=float, default=0.999, help=\"adam: decay of second order momentum of gradient\")\r\nparser.add_argument(\"--decay_epoch\", type=int, default=100, help=\"epoch from which to start lr decay\")\r\nparser.add_argument(\"--n_cpu\",type=int, default=8, help=\"number of cpu threads to use during batch generation\")\r\nparser.add_argument(\"--hr_height\", type=int, default=500, help=\"high res. image height\")\r\nparser.add_argument(\"--hr_width\", type=int, default=500, help=\"high res. image width\")\r\nparser.add_argument(\"--channels\", type=int, default=1, help=\"number of image channels\")\r\nparser.add_argument(\"--sample_interval\", type=int, default=100, help=\"interval between saving image samples\")\r\nparser.add_argument(\"--checkpoint_interval\", type=int, default=10, help=\"interval between model checkpoints\")\r\nparser.add_argument('--gennum',type=str)\r\n\r\nopt = parser.parse_args()\r\n\r\nprint(opt)\r\nprint('gennum',opt.gennum)\r\n\r\n# cuda = torch.cuda.is_available()\r\n\r\n\r\nhr_shape = (opt.hr_height, opt.hr_width)\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch\r\n# from torchvision.models import vgg19\r\nfrom torchvision.models import densenet121\r\nimport math\r\n\r\n# 定义残差块\r\nclass ResidualBlock(nn.Module):\r\n def __init__(self, in_features):\r\n super(ResidualBlock, self).__init__() # in_features: 64\r\n self.conv_block = nn.Sequential(\r\n nn.Conv2d(in_features, in_features, kernel_size=3, stride=1, padding=1),\r\n nn.BatchNorm2d(in_features, 0.8),\r\n nn.PReLU(),\r\n nn.Conv2d(in_features, in_features, kernel_size=3, stride=1, padding=1),\r\n nn.BatchNorm2d(in_features, 0.8),\r\n )\r\n\r\n def forward(self, x):\r\n return x + self.conv_block(x)\r\n\r\n# 定义生成器\r\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\r\n \"\"\"3x3 convolution with padding\"\"\"\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\r\n padding=dilation, groups=groups, bias=False, dilation=dilation)\r\n\r\ndef conv1x1(in_planes, out_planes, stride=1):\r\n \"\"\"1x1 convolution\"\"\"\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\r\n\r\nclass BasicBlock(nn.Module):\r\n expansion = 1\r\n\r\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\r\n base_width=64, dilation=1, norm_layer=None):\r\n super(BasicBlock, self).__init__()\r\n if norm_layer is None:\r\n norm_layer = nn.BatchNorm2d\r\n if groups != 1 or base_width != 64:\r\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\r\n if dilation > 1:\r\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\r\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\r\n self.conv1 = conv3x3(inplanes, planes, stride)\r\n self.bn1 = norm_layer(planes)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.conv2 = conv3x3(planes, planes)\r\n self.bn2 = norm_layer(planes)\r\n self.downsample = downsample\r\n self.stride = stride\r\n\r\n def forward(self, x):\r\n identity = x\r\n\r\n out = self.conv1(x)\r\n out = self.bn1(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv2(out)\r\n out = self.bn2(out)\r\n\r\n if self.downsample is not None:\r\n identity = self.downsample(x)\r\n\r\n out += identity\r\n out = self.relu(out)\r\n\r\n return out\r\n\r\nclass Bottleneck(nn.Module):\r\n\r\n expansion = 4\r\n\r\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\r\n base_width=64, dilation=1, norm_layer=None):\r\n super(Bottleneck, self).__init__()\r\n if norm_layer is None:\r\n norm_layer = nn.BatchNorm2d\r\n width = int(planes * (base_width / 64.)) * groups\r\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\r\n self.conv1 = conv1x1(inplanes, width)\r\n self.bn1 = norm_layer(width)\r\n self.conv2 = conv3x3(width, width, stride, groups, dilation)\r\n self.bn2 = norm_layer(width)\r\n self.conv3 = conv1x1(width, planes * self.expansion)\r\n self.bn3 = norm_layer(planes * self.expansion)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.downsample = downsample\r\n self.stride = stride\r\n\r\n def forward(self, x):\r\n identity = x\r\n\r\n out = self.conv1(x)\r\n out = self.bn1(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv2(out)\r\n out = self.bn2(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv3(out)\r\n out = self.bn3(out)\r\n\r\n if self.downsample is not None:\r\n identity = self.downsample(x)\r\n\r\n out += identity\r\n out = self.relu(out)\r\n\r\n return out\r\n\r\nclass HighResolutionModule(nn.Module):\r\n def __init__(self, num_branches, blocks, num_blocks, num_inchannels,\r\n num_channels, fuse_method, multi_scale_output=True, norm_layer=None):\r\n super(HighResolutionModule, self).__init__()\r\n self._check_branches(\r\n num_branches, blocks, num_blocks, num_inchannels, num_channels)\r\n\r\n if norm_layer is None:\r\n norm_layer = nn.BatchNorm2d\r\n self.norm_layer = norm_layer\r\n\r\n self.num_inchannels = num_inchannels\r\n self.fuse_method = fuse_method\r\n self.num_branches = num_branches\r\n\r\n self.multi_scale_output = multi_scale_output\r\n\r\n self.branches = self._make_branches(\r\n num_branches, blocks, num_blocks, num_channels)\r\n self.fuse_layers = self._make_fuse_layers()\r\n self.relu = nn.ReLU(inplace=True)\r\n\r\n def _check_branches(self, num_branches, blocks, num_blocks,\r\n num_inchannels, num_channels):\r\n if num_branches != len(num_blocks):\r\n error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(\r\n num_branches, len(num_blocks))\r\n logger.error(error_msg)\r\n raise ValueError(error_msg)\r\n\r\n if num_branches != len(num_channels):\r\n error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(\r\n num_branches, len(num_channels))\r\n logger.error(error_msg)\r\n raise ValueError(error_msg)\r\n\r\n if num_branches != len(num_inchannels):\r\n error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(\r\n num_branches, len(num_inchannels))\r\n logger.error(error_msg)\r\n raise ValueError(error_msg)\r\n\r\n def _make_one_branch(self, branch_index, block, num_blocks, num_channels,\r\n stride=1):\r\n downsample = None\r\n if stride != 1 or \\\r\n self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:\r\n downsample = nn.Sequential(\r\n nn.Conv2d(self.num_inchannels[branch_index],\r\n num_channels[branch_index] * block.expansion,\r\n kernel_size=1, stride=stride, bias=False),\r\n self.norm_layer(num_channels[branch_index] * block.expansion),\r\n )\r\n\r\n layers = []\r\n layers.append(block(self.num_inchannels[branch_index],\r\n num_channels[branch_index], stride, downsample, norm_layer=self.norm_layer))\r\n self.num_inchannels[branch_index] = \\\r\n num_channels[branch_index] * block.expansion\r\n for i in range(1, num_blocks[branch_index]):\r\n layers.append(block(self.num_inchannels[branch_index],\r\n num_channels[branch_index], norm_layer=self.norm_layer))\r\n\r\n return nn.Sequential(*layers)\r\n\r\n def _make_branches(self, num_branches, block, num_blocks, num_channels):\r\n branches = []\r\n\r\n for i in range(num_branches):\r\n branches.append(\r\n self._make_one_branch(i, block, num_blocks, num_channels))\r\n\r\n return nn.ModuleList(branches)\r\n\r\n def _make_fuse_layers(self):\r\n if self.num_branches == 1:\r\n return None\r\n\r\n num_branches = self.num_branches\r\n num_inchannels = self.num_inchannels\r\n fuse_layers = []\r\n for i in range(num_branches if self.multi_scale_output else 1):\r\n fuse_layer = []\r\n for j in range(num_branches):\r\n if j > i:\r\n fuse_layer.append(nn.Sequential(\r\n nn.Conv2d(num_inchannels[j],\r\n num_inchannels[i],\r\n 1,\r\n 1,\r\n 0,\r\n bias=False),\r\n self.norm_layer(num_inchannels[i])))\r\n elif j == i:\r\n fuse_layer.append(None)\r\n else:\r\n conv3x3s = []\r\n for k in range(i-j):\r\n if k == i - j - 1:\r\n num_outchannels_conv3x3 = num_inchannels[i]\r\n conv3x3s.append(nn.Sequential(\r\n nn.Conv2d(num_inchannels[j],\r\n num_outchannels_conv3x3,\r\n 3, 2, 1, bias=False),\r\n self.norm_layer(num_outchannels_conv3x3)))\r\n else:\r\n num_outchannels_conv3x3 = num_inchannels[j]\r\n conv3x3s.append(nn.Sequential(\r\n nn.Conv2d(num_inchannels[j],\r\n num_outchannels_conv3x3,\r\n 3, 2, 1, bias=False),\r\n self.norm_layer(num_outchannels_conv3x3),\r\n nn.ReLU(inplace=True)))\r\n fuse_layer.append(nn.Sequential(*conv3x3s))\r\n fuse_layers.append(nn.ModuleList(fuse_layer))\r\n\r\n return nn.ModuleList(fuse_layers)\r\n\r\n def get_num_inchannels(self):\r\n return self.num_inchannels\r\n\r\n def forward(self, x):\r\n if self.num_branches == 1:\r\n return [self.branches[0](x[0])]\r\n\r\n for i in range(self.num_branches):\r\n x[i] = self.branches[i](x[i])\r\n\r\n x_fuse = []\r\n for i in range(len(self.fuse_layers)):\r\n y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])\r\n for j in range(1, self.num_branches):\r\n if i == j:\r\n y = y + x[j]\r\n elif j > i:\r\n width_output = x[i].shape[-1]\r\n height_output = x[i].shape[-2]\r\n y = y + F.interpolate(\r\n self.fuse_layers[i][j](x[j]),\r\n size=[height_output, width_output],\r\n mode='bilinear',\r\n align_corners=True\r\n )\r\n else:\r\n y = y + self.fuse_layers[i][j](x[j])\r\n x_fuse.append(self.relu(y))\r\n\r\n return x_fuse\r\n\r\nblocks_dict = {\r\n 'BASIC': BasicBlock,\r\n 'BOTTLENECK': Bottleneck\r\n}\r\n\r\ncfg={'STAGE1':{'NUM_MODULES':1,'NUM_BRANCHES':1,'BLOCK':'BOTTLENECK','NUM_BLOCKS':[4],'NUM_CHANNELS':[32],'FUSE_METHOD':'SUM'},\r\n 'STAGE2':{'NUM_MODULES':1,'NUM_BRANCHES':2,'BLOCK':'BASIC','NUM_BLOCKS':[4,4],'NUM_CHANNELS':[8,16],'FUSE_METHOD':'SUM'},\r\n 'STAGE3':{'NUM_MODULES':3,'NUM_BRANCHES':3,'BLOCK':'BASIC','NUM_BLOCKS':[4,4,4],'NUM_CHANNELS':[8,16,32],'FUSE_METHOD':'SUM'},\r\n 'STAGE4':{'NUM_MODULES':4,'NUM_BRANCHES':4,'BLOCK':'BASIC','NUM_BLOCKS':[4,4,4,4],'NUM_CHANNELS':[8,16,32,64],'FUSE_METHOD':'SUM'}}\r\n\r\n# 定义生成器\r\nclass GeneratorResNet(nn.Module):\r\n def __init__(self,\r\n cfg,\r\n norm_layer=None):\r\n super(GeneratorResNet, self).__init__()\r\n\r\n if norm_layer is None:\r\n norm_layer = nn.BatchNorm2d\r\n self.norm_layer = norm_layer\r\n # stem network\r\n # stem net\r\n # self.conv1 = nn.Conv2d(3, 64, kernel_size=1, stride=1, padding=0,\r\n # bias=False)\r\n self.convtran2d = nn.ConvTranspose2d(1,32,kernel_size=5,stride=2,padding=2,bias=False,output_padding=1)\r\n self.bn1 = self.norm_layer(32)\r\n self.conv2 = nn.Conv2d(32, 32, kernel_size=3, stride=2, padding=2,\r\n bias=False)\r\n self.bn2 = self.norm_layer(32)\r\n self.relu = nn.ReLU(inplace=True)\r\n\r\n # stage 1\r\n self.stage1_cfg = cfg['STAGE1']\r\n num_channels = self.stage1_cfg['NUM_CHANNELS'][0]\r\n block = blocks_dict[self.stage1_cfg['BLOCK']]\r\n num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]\r\n self.layer1 = self._make_layer(block, 32, num_channels, num_blocks)\r\n stage1_out_channel = block.expansion * num_channels\r\n\r\n\r\n # stage 2\r\n self.stage2_cfg = cfg['STAGE2']\r\n num_channels = self.stage2_cfg['NUM_CHANNELS']\r\n block = blocks_dict[self.stage2_cfg['BLOCK']]\r\n num_channels = [\r\n num_channels[i] * block.expansion for i in range(len(num_channels))]\r\n self.transition1 = self._make_transition_layer(\r\n [stage1_out_channel], num_channels)\r\n self.stage2, pre_stage_channels = self._make_stage(\r\n self.stage2_cfg, num_channels)\r\n\r\n # stage 3\r\n self.stage3_cfg = cfg['STAGE3']\r\n num_channels = self.stage3_cfg['NUM_CHANNELS']\r\n block = blocks_dict[self.stage3_cfg['BLOCK']]\r\n num_channels = [\r\n num_channels[i] * block.expansion for i in range(len(num_channels))]\r\n self.transition2 = self._make_transition_layer(\r\n pre_stage_channels, num_channels)\r\n self.stage3, pre_stage_channels = self._make_stage(\r\n self.stage3_cfg, num_channels)\r\n\r\n # stage 4\r\n self.stage4_cfg = cfg['STAGE4']\r\n num_channels = self.stage4_cfg['NUM_CHANNELS']\r\n block = blocks_dict[self.stage4_cfg['BLOCK']]\r\n num_channels = [\r\n num_channels[i] * block.expansion for i in range(len(num_channels))]\r\n self.transition3 = self._make_transition_layer(\r\n pre_stage_channels, num_channels)\r\n self.stage4, pre_stage_channels = self._make_stage(\r\n self.stage4_cfg, num_channels, multi_scale_output=True)\r\n\r\n last_inp_channels = np.int(np.sum(pre_stage_channels))\r\n\r\n self.last_layer = nn.Sequential(\r\n nn.Conv2d(\r\n in_channels=last_inp_channels,\r\n out_channels=last_inp_channels,\r\n kernel_size=1,\r\n stride=1,\r\n padding=0),\r\n self.norm_layer(last_inp_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(\r\n in_channels=last_inp_channels,\r\n out_channels=1,\r\n kernel_size=1,\r\n stride=1,\r\n padding=0)\r\n )\r\n\r\n def _make_transition_layer(\r\n self, num_channels_pre_layer, num_channels_cur_layer):\r\n num_branches_cur = len(num_channels_cur_layer)\r\n num_branches_pre = len(num_channels_pre_layer)\r\n\r\n transition_layers = []\r\n for i in range(num_branches_cur):\r\n if i < num_branches_pre:\r\n if num_channels_cur_layer[i] != num_channels_pre_layer[i]:\r\n transition_layers.append(nn.Sequential(\r\n nn.Conv2d(num_channels_pre_layer[i],\r\n num_channels_cur_layer[i],\r\n 3,\r\n 1,\r\n 1,\r\n bias=False),\r\n self.norm_layer(num_channels_cur_layer[i]),\r\n nn.ReLU(inplace=True)))\r\n else:\r\n transition_layers.append(None)\r\n else:\r\n conv3x3s = []\r\n for j in range(i + 1 - num_branches_pre):\r\n inchannels = num_channels_pre_layer[-1]\r\n outchannels = num_channels_cur_layer[i] \\\r\n if j == i - num_branches_pre else inchannels\r\n conv3x3s.append(nn.Sequential(\r\n nn.Conv2d(\r\n inchannels, outchannels, 3, 2, 1, bias=False),\r\n self.norm_layer(outchannels),\r\n nn.ReLU(inplace=True)))\r\n transition_layers.append(nn.Sequential(*conv3x3s))\r\n\r\n return nn.ModuleList(transition_layers)\r\n\r\n def _make_layer(self, block, inplanes, planes, blocks, stride=1):\r\n downsample = None\r\n if stride != 1 or inplanes != planes * block.expansion:\r\n downsample = nn.Sequential(\r\n nn.Conv2d(inplanes, planes * block.expansion,\r\n kernel_size=1, stride=stride, bias=False),\r\n self.norm_layer(planes * block.expansion),\r\n )\r\n\r\n layers = []\r\n layers.append(block(inplanes, planes, stride, downsample, norm_layer=self.norm_layer))\r\n inplanes = planes * block.expansion\r\n for i in range(1, blocks):\r\n layers.append(block(inplanes, planes, norm_layer=self.norm_layer))\r\n\r\n return nn.Sequential(*layers)\r\n\r\n def _make_stage(self, layer_config, num_inchannels,\r\n multi_scale_output=True):\r\n num_modules = layer_config['NUM_MODULES']\r\n num_branches = layer_config['NUM_BRANCHES']\r\n num_blocks = layer_config['NUM_BLOCKS']\r\n num_channels = layer_config['NUM_CHANNELS']\r\n block = blocks_dict[layer_config['BLOCK']]\r\n fuse_method = layer_config['FUSE_METHOD']\r\n\r\n modules = []\r\n for i in range(num_modules):\r\n # multi_scale_output is only used last module\r\n if not multi_scale_output and i == num_modules - 1:\r\n reset_multi_scale_output = False\r\n else:\r\n reset_multi_scale_output = True\r\n\r\n modules.append(\r\n HighResolutionModule(num_branches,\r\n block,\r\n num_blocks,\r\n num_inchannels,\r\n num_channels,\r\n fuse_method,\r\n reset_multi_scale_output,\r\n norm_layer=self.norm_layer)\r\n )\r\n num_inchannels = modules[-1].get_num_inchannels()\r\n\r\n return nn.Sequential(*modules), num_inchannels\r\n\r\n def forward(self, x):\r\n # print('xc1',x.shape)\r\n # x = self.conv1(x)\r\n x = self.convtran2d(x)\r\n # print('xc2', x.shape)\r\n x = self.bn1(x)\r\n # print('xc2', x.shape)\r\n x = self.relu(x)\r\n # print('xc2',x.shape)\r\n #\r\n # x = self.conv2(x)\r\n # x = self.bn2(x)\r\n # x = self.relu(x)\r\n x = self.layer1(x)\r\n # print('xl1',x.shape)\r\n\r\n x_list = []\r\n for i in range(self.stage2_cfg['NUM_BRANCHES']):\r\n if self.transition1[i] is not None:\r\n x_list.append(self.transition1[i](x))\r\n else:\r\n x_list.append(x)\r\n print('stage2',x.shape)\r\n y_list = self.stage2(x_list)\r\n\r\n x_list = []\r\n for i in range(self.stage3_cfg['NUM_BRANCHES']):\r\n if self.transition2[i] is not None:\r\n if i < self.stage2_cfg['NUM_BRANCHES']:\r\n x_list.append(self.transition2[i](y_list[i]))\r\n else:\r\n x_list.append(self.transition2[i](y_list[-1]))\r\n else:\r\n x_list.append(y_list[i])\r\n\r\n\r\n y_list = self.stage3(x_list)\r\n\r\n x_list = []\r\n for i in range(self.stage4_cfg['NUM_BRANCHES']):\r\n if self.transition3[i] is not None:\r\n if i < self.stage3_cfg['NUM_BRANCHES']:\r\n x_list.append(self.transition3[i](y_list[i]))\r\n else:\r\n x_list.append(self.transition3[i](y_list[-1]))\r\n else:\r\n x_list.append(y_list[i])\r\n x = self.stage4(x_list)\r\n\r\n # Upsampling\r\n x0_h, x0_w = x[0].size(2), x[0].size(3)\r\n x1 = F.interpolate(x[1], size=(x0_h, x0_w), mode='bilinear', align_corners=True)\r\n x2 = F.interpolate(x[2], size=(x0_h, x0_w), mode='bilinear', align_corners=True)\r\n x3 = F.interpolate(x[3], size=(x0_h, x0_w), mode='bilinear', align_corners=True)\r\n # print('x[0].shape',x[0].shape)\r\n # print('x1.shape',x1.shape)\r\n # print('x2.shape',x2.shape)\r\n # print('x3.shape',x3.shape)\r\n x = torch.cat([x[0], x1, x2, x3], 1)\r\n x = self.last_layer(x)\r\n # print('x.shape',x.shape)\r\n\r\n return x\r\n\r\nfrom collections import OrderedDict\r\n\r\n# 定义特征提取器\r\nclass FeatureExtractor(nn.Module):\r\n def __init__(self):\r\n super(FeatureExtractor, self).__init__()\r\n vgg19_model = densenet121(pretrained=False)\r\n # vgg19_model.conv0=nn.Conv2d(4, 64, kernel_size=7, stride=2, padding=3, bias=False)\r\n vgg19_model.features = nn.Sequential(OrderedDict([\r\n ('conv0', nn.Conv2d(1, 64, kernel_size=7, stride=2,\r\n padding=3, bias=False)),\r\n ('norm0', nn.BatchNorm2d(64)),\r\n ('relu0', nn.ReLU(inplace=True)),\r\n ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),\r\n ]))\r\n # 取了VGG19的前18层,见注释8\r\n self.feature_extractor = nn.Sequential(*list(vgg19_model.features.children())[:120])\r\n\r\n def forward(self, img):\r\n return self.feature_extractor(img)\r\n\r\ngenerator = GeneratorResNet(cfg)\r\n# discriminator = Discriminator(input_shape=(opt.channels, *hr_shape))\r\nfeature_extractor = FeatureExtractor()\r\n\r\n# 将特征提取器设为评估模式\r\nfeature_extractor.eval()\r\n\r\n# 设置损失函数,MSELoss和L1Loss\r\ncriterion_GAN = torch.nn.MSELoss()\r\ncriterion_content = torch.nn.L1Loss()\r\n\r\ngenerator.load_state_dict(torch.load(\"saved_models_hrnet/generator_170.pth\"))\r\n # discriminator.load_state_dict(torch.load(\"saved_models/discriminator_%d.pth\"))\r\ngenerator.eval()\r\n\r\n# 导入数据集\r\nimport random\r\nimport os\r\nimport numpy as np\r\n\r\nimport torch\r\nfrom torch.utils.data import Dataset\r\nfrom PIL import Image\r\nimport torchvision.transforms as transforms\r\n\r\n# 设定预训练PyTorch模型的归一化参数\r\n# mean = np.array([0.485, 0.456, 0.406])\r\n# std = np.array([0.229, 0.224, 0.225])\r\n\r\n\r\nclass ImageDataset(Dataset):\r\n def __init__(self, hr_shape):\r\n hr_height, hr_width = hr_shape # hr_shape=(128, 128)\r\n # 通过源图像分别生成低、高分辨率图像,4倍\r\n self.lr_transform = transforms.Compose( # 见注释8\r\n [\r\n transforms.Resize((hr_height // 2, hr_height // 2), Image.BICUBIC),\r\n transforms.ToTensor(),\r\n # transforms.Normalize(mean, std),\r\n ]\r\n )\r\n self.hr_transform = transforms.Compose(\r\n [\r\n # transforms.Resize((hr_height, hr_height), Image.BICUBIC),\r\n transforms.ToTensor(),\r\n # transforms.Normalize(mean, std),\r\n ]\r\n )\r\n # 将文件夹中的图片进行按文件名升序排列,从000001.jpg到202599.jpg\r\n self.filesH = sorted(os.listdir('/data/DeepRockSR-2D_copy/DeepRockSR-2D/shuffled2D/shuffled2D_valid_HR'))\r\n self.filesL = sorted(os.listdir('/data/DeepRockSR-2D_copy/DeepRockSR-2D/shuffled2D/shuffled2D_valid_HRB_G_BB9'))\r\n\r\n def __getitem__(self, index): # 定义时未调用,每次读取图像时调用,见注释9\r\n imgl = Image.open('/data/DeepRockSR-2D_copy/DeepRockSR-2D/shuffled2D/shuffled2D_valid_HRB_G_BB9/' + self.filesL[\r\n index % len(self.filesL)]).convert('L')\r\n imgh = Image.open('/data/DeepRockSR-2D_copy/DeepRockSR-2D/shuffled2D/shuffled2D_valid_HR/' + self.filesH[\r\n index % len(self.filesH)]).convert('L')\r\n # im2 = imgl\r\n # im2.save(str(index) + \"-.png\")\r\n\r\n img_lr = self.lr_transform(imgl)\r\n img_hr = self.hr_transform(imgh)\r\n\r\n\r\n return {\"lr\": img_lr,\"hr\": img_hr}\r\n\r\n # 定义dataloader和每次读取图像时均调用\r\n def __len__(self):\r\n return len(self.filesH)\r\n\r\n# 用定义好的方法来读取数据集\r\ndataloader = DataLoader(\r\n ImageDataset(hr_shape=hr_shape),\r\n batch_size=opt.batch_size, # batch_size = 4\r\n shuffle=False,\r\n num_workers=opt.n_cpu, # num_workers = 8\r\n)\r\nprint('数据集准备完毕')\r\n\r\n# 定义特征提取器\r\n\r\n\r\nfeature_extractor = FeatureExtractor()\r\nfeature_extractor.eval()\r\n\r\n#定义Tensor类型\r\n# Tensor = torch.cuda.FloatTensor if cuda else torch.Tensor\r\nTensor = torch.FloatTensor\r\n\r\nfrom PIL import Image\r\n# import numpy\r\n# print('start training')\r\n\r\nfrom PIL import ImageFile\r\nImageFile.LOAD_TRUNCATED_IMAGES = True\r\nimport cv2\r\nfrom skimage.metrics import peak_signal_noise_ratio\r\nfrom skimage.measure import compare_ssim\r\n\r\n\r\nmselist = [0 for _ in range(len(dataloader))]\r\nPSNRlist =[]\r\nSSIMlist = []\r\n\r\ntry:\r\n os.mkdir(str(opt.gennum)+'imageval_hrnet/')\r\n os.mkdir('result')\r\nexcept:\r\n pass\r\nfor i,imgs in enumerate(dataloader):\r\n # print(i)\r\n\r\n imgs_lr = Variable(imgs[\"lr\"].type(Tensor)) # torch.Size([4,3,500,500])\r\n imgs_hr = Variable(imgs[\"hr\"].type(Tensor))\r\n gen_hr = generator(imgs_lr)\r\n gn = gen_hr.detach().numpy()\r\n ir = imgs_lr.detach().numpy()\r\n hr = imgs_hr.detach().numpy()\r\n\r\n # print('gn.shape',gn.shape)\r\n\r\n gen_features = feature_extractor(gen_hr)\r\n real_features = feature_extractor(imgs_hr)\r\n print('gen_features.shape', gen_features.shape)\r\n print('real_features.shape', real_features.shape)\r\n loss_content = criterion_content(gen_features, real_features.detach())\r\n print('loss_content',loss_content)\r\n mselist[i]=loss_content.item()\r\n print('loss_content',mselist[i])\r\n print('gnlen',len(gn))\r\n print('gnshape',gn.shape)\r\n for k in range(len(gn)):\r\n gnn = gn[k]\r\n # gnn = gnn.astype(np.uint8)\r\n # gnn = gnn.reshape(500,500,3)\r\n\r\n # gnn=np.array()\r\n # gnn = gnn.transpose(1,0)\r\n # gnn = np.resize(gnn, (500, 500, 3))\r\n gnn = gnn[0]\r\n # print('gnnshape',gnn.shape)\r\n # gnn = gnn.reshape(gnn, (500, 500))\r\n # print('gnn',gnn.shape)\r\n\r\n # 生成的图像\r\n im = Image.fromarray(np.uint8(gnn))\r\n\r\n im.save(str(opt.gennum)+'imageval_hrnet/'+str(i)+str(k)+\".png\")\r\n\r\n # 四倍下采样和模糊后的图像\r\n irr = ir[k]*255\r\n irr = irr[0]\r\n # irr = irr.transpose(1,0)\r\n # print('irr',irr.shape)\r\n im2 = Image.fromarray(np.uint8(irr))\r\n im2.save(str(opt.gennum)+'imageval_hrnet/' + str(i) + str(k) + \"-.png\")\r\n\r\n # 原图\r\n hrr = hr[k] * 255\r\n hrr=hrr[0]\r\n # hrr = hrr.transpose(1,0)\r\n im3 = Image.fromarray(np.uint8(hrr))\r\n im3.save(str(opt.gennum)+'imageval_hrnet/' + str(i) + str(k) + \"--.png\")\r\n\r\n img1 = cv2.imread(str(opt.gennum)+'imageval_hrnet/'+str(i)+str(k)+\".png\")\r\n\r\n img2 = cv2.imread(str(opt.gennum)+'imageval_hrnet/' + str(i) + str(k) + \"--.png\")\r\n\r\n print('PSNR shape',img1.shape,img2.shape)\r\n PSNR = peak_signal_noise_ratio(img1, img2)\r\n SSIM = compare_ssim(img1, img2, multichannel=True)\r\n\r\n PSNRlist.append(PSNR)\r\n SSIMlist.append(SSIM)\r\n\r\n print(PSNR,SSIM)\r\n\r\nprint(str(sum(mselist)/len(mselist)))\r\nwith open('result/'+str(opt.gennum)+'result_hrnet.txt','a') as f:\r\n f.write('\\nloss:'+str(sum(mselist)/len(mselist)))\r\n f.write('\\nPSNR:'+str(sum(PSNRlist)/len(PSNRlist)))\r\n f.write('\\nSSIM:'+str(sum(SSIMlist)/len(SSIMlist)))\r\n\r\n print(sum(mselist)/len(mselist))","repo_name":"LilyCaiZL/HRGAN","sub_path":"testhrgan.py","file_name":"testhrgan.py","file_ext":"py","file_size_in_byte":29418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34670179268","text":"import uiScriptLocale\n\nTAKE_ROOT = \"d:/ymir work/ui/minigame/rumi/\"\n\nwindow = {\n\t\"name\" : \"MiniGameWindow\",\n\t\n\t\"x\" : SCREEN_WIDTH - 136 - 100,\n\t\"y\" : 15,\n\t\n\t\"width\" : 100,\n\t\"height\" : 58,\n\t\n\t\"children\" :\n\t(\n\t\t{\n\t\t\t\"name\" : \"mini_game_window\",\n\t\t\t\"type\" : \"window\",\n\t\t\t\n\t\t\t\"x\" : 0,\n\t\t\t\"y\" : 0,\n\t\t\t\n\t\t\t\"width\" : 100,\n\t\t\t\"height\" : 58,\n\t\t\t\n\t\t\t\"children\" :\n\t\t\t(\n\t\t\t\t{\n\t\t\t\t\t\"name\" : \"minigame_rumi_button\",\n\t\t\t\t\t\"type\" : \"button\",\n\t\t\t\t\t\n\t\t\t\t\t\"x\" : 0,\n\t\t\t\t\t\"y\" : 0,\n\n\n\t\t\t\t\t\"tooltip_text\" : uiScriptLocale.MINI_GAME_RUMI_TITLE,\n\t\t\t\t\t\"tooltip_x\" : -2,\n\t\t\t\t\t\"tooltip_y\" : 55,\n\t\t\t\t\t\n\t\t\t\t\t\"default_image\" : TAKE_ROOT + \"rumi_button_min.sub\",\n\t\t\t\t\t\"over_image\" : TAKE_ROOT + \"rumi_button_min.sub\",\n\t\t\t\t\t\"down_image\" : TAKE_ROOT + \"rumi_button_min.sub\",\n\t\t\t\t},\n\t\t\t),\n\t\t},\t\t\n\t),\t\n}\n","repo_name":"Sophie-Williams/metin2_okey_cards","sub_path":"EPK_CLIENT/root/uiscript/minigamewindow.py","file_name":"minigamewindow.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"39131517284","text":"#!/usr/bin/env python3\n\n__day__ = 15\n\n__year__ = 2022\n\n__motd__ = '--- Year %s -- Day %s ---' % (__year__, __day__)\n\n__url__ = 'http://adventofcode.com/%s/day/%s' % (__year__, __day__)\n\nimport re\n\nverbose = 1\n\n\nclass BeaconExclusionZone:\n\n def __init__(self):\n pass\n\n def print(self, txt='', map=None):\n \"\"\" visualize map \"\"\"\n if not verbose: return\n x = [ xy[0] for xy in map.keys() ] + [ xy[0] for xy in map.values() ]\n y = [ xy[1] for xy in map.keys() ] + [ xy[1] for xy in map.values() ]\n minx, maxx = min(x), max(x)\n miny, maxy = min(y), max(y)\n #\n if txt: print(txt)\n print('x', '\\t', ''.join(['%1s' % int(col / 10) for col in range(minx, maxx+1)]))\n print('i', '\\t', ''.join(['%1s' % (col % 10) for col in range(minx, maxx+1)]))\n for row in range(miny, maxy+1):\n r = [ 'S' if map.get((col,row)) else '.' for col in range(minx, maxx+1) ]\n print(row, '\\t', ''.join(r))\n print()\n\n def print2(self, txt='', map=None):\n \"\"\" visualize map \"\"\"\n if not verbose: return\n x = [ xy[0] for xy in map.keys() ]\n y = [ xy[1] for xy in map.keys() ]\n minx, maxx = min(x), max(x)\n miny, maxy = min(y), max(y)\n #\n if txt: print(txt)\n print('x', '\\t', ''.join(['%1s' % int(col / 10) for col in range(minx, maxx+1)]))\n print('i', '\\t', ''.join(['%1s' % (col % 10) for col in range(minx, maxx+1)]))\n for row in range(miny, maxy+1):\n r = [ map.get((col,row), '.') for col in range(minx, maxx+1) ]\n print(row, '\\t', ''.join(r))\n print()\n\n def coverage(self):\n \"\"\" fill coverage to the map \"\"\"\n covermap = {}\n for sensorxy, beaconxy in self.sbmap.items():\n if verbose: print('sensor', sensorxy, 'beacon', beaconxy, 'size of coverage map is', len(covermap))\n d = self.manhattan_dist(sensorxy, beaconxy)\n covermap.setdefault(sensorxy, 'S')\n covermap.setdefault(beaconxy, 'B')\n for xy in self.manhattan_square(sensorxy, d):\n covermap.setdefault(xy, '#')\n return covermap\n\n def manhattan_dist(self, sensorxy, beaconxy):\n \"\"\" calc hamming distance between sensor (xy0 to the closest beacon (xy) \"\"\"\n dx, dy = beaconxy[0] - sensorxy[0], beaconxy[1] - sensorxy[1]\n return abs(dx) + abs(dy)\n\n def manhattan_square(self, centerxy, d: int):\n \"\"\" generate all xy within manhattan distance d from center xy \"\"\"\n for r in range(1, d+1):\n for x in range(-r, r+1):\n for y in range(-r, r+1):\n xy = centerxy[0]+x, centerxy[1]+y\n if self.manhattan_dist(centerxy, xy) > d: continue\n yield xy\n\n def parse(self, input: list):\n \"\"\" parse string list input \"\"\"\n # sensor -> beacon\n sbmap = {}\n for idx, line in enumerate(input):\n sensorxy, beaconxy = self.parse_line(line)\n if sensorxy is None or beaconxy is None:\n print('ERR parsing line:', idx+1, ':', line)\n continue\n sbmap[sensorxy] = beaconxy\n return sbmap\n\n def parse_line(self, line: str):\n \"\"\" parse: Sensor at x=2, y=18: closest beacon is at x=-2, y=15 \"\"\"\n m = re.match('Sensor at x=(?P-?\\d+), y=(?P-?\\d+): closest beacon is at x=(?P-?\\d+), y=(?P-?\\d+)', line)\n if not m: return None, None\n sensorxy = (int(m['sx']), int(m['sy']))\n beaconxy = (int(m['bx']), int(m['by']))\n return sensorxy, beaconxy\n\n def count_nosignal_at_row(self, cmap, row: int):\n \"\"\" \"\"\"\n r = [ cmap.get(xy) for xy in cmap.keys() if xy[1] == row ]\n return r.count('#')\n\n def task_a(self, input: list, result):\n \"\"\" task A \"\"\"\n # sensor -> beacon\n self.sbmap = self.parse(input)\n #self.print('sensors', self.sbmap)\n cmap = self.coverage()\n #self.print2('coverage', cmap)\n return self.count_nosignal_at_row(cmap, result[0])\n\n def task_b(self, input: list):\n \"\"\" task B \"\"\"\n return None\n\n\ndef testcase_a(sut, input, result, trim=str.rstrip):\n \"\"\" testcase verifies if input returns result \"\"\"\n # read default input file\n if input is None:\n data = __file__.replace('.py', '.input')\n with open(data) as f:\n input = [ trim(line) for line in f ]\n # file is single line only\n if len(input) == 1:\n input = input[0]\n #\n print(\"TestCase A using input:\", data if 'data' in vars() else input)\n # read multiline string as input\n if input.count('\\n') > 2:\n input = [ trim(line) for line in input.splitlines() ]\n # optional delete the first empty line\n if len(input[0]) == 0:\n input = input[1:]\n #\n print(\"\\t expected result:\", result)\n r = sut.task_a(input, result)\n print('\\t got:',r,'\\t','[ OK ]' if r == result[1] else '[ ERR ]')\n print()\n\ndef testcase_b(sut, input, result, trim=str.rstrip):\n \"\"\" testcase verifies if input returns result \"\"\"\n # read default input file\n if input is None:\n data = __file__.replace('.py', '.input')\n with open(data) as f:\n input = [ trim(line) for line in f ]\n # file is single line only\n if len(input) == 1:\n input = input[0]\n #\n print(\"TestCase B using input:\", data if 'data' in vars() else input)\n # read multiline string as input\n if input.count('\\n') > 2:\n input = [ trim(line) for line in input.splitlines() ]\n # optional delete the first empty line\n if len(input[0]) == 0:\n input = input[1:]\n #\n print(\"\\t expected result:\", result)\n r = sut.task_b(input)\n print('\\t got:',r,'\\t','[ OK ]' if r == result else '[ ERR ]')\n print()\n\n\n# ======\n# MAIN\n# ======\n\nprint()\nprint(__motd__, __url__)\nprint()\n\ntestdata = \"\"\"\nSensor at x=2, y=18: closest beacon is at x=-2, y=15\nSensor at x=9, y=16: closest beacon is at x=10, y=16\nSensor at x=13, y=2: closest beacon is at x=15, y=3\nSensor at x=12, y=14: closest beacon is at x=10, y=16\nSensor at x=10, y=20: closest beacon is at x=10, y=16\nSensor at x=14, y=17: closest beacon is at x=10, y=16\nSensor at x=8, y=7: closest beacon is at x=2, y=10\nSensor at x=2, y=0: closest beacon is at x=2, y=10\nSensor at x=0, y=11: closest beacon is at x=2, y=10\nSensor at x=20, y=14: closest beacon is at x=25, y=17\nSensor at x=17, y=20: closest beacon is at x=21, y=22\nSensor at x=16, y=7: closest beacon is at x=15, y=3\nSensor at x=14, y=3: closest beacon is at x=15, y=3\nSensor at x=20, y=1: closest beacon is at x=15, y=3\n\"\"\"\n\n# ========\n# Task A\n# ========\n\n# test cases\ntestcase_a(BeaconExclusionZone(), testdata, (10, 26))\n\n# ?\ntestcase_a(BeaconExclusionZone(), None, (2000000, 1))\n\n\n","repo_name":"blue-sky-r/Advent-Of-Code","sub_path":"2022/15/u15.py","file_name":"u15.py","file_ext":"py","file_size_in_byte":6911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26676430004","text":"## Built-in modules\nimport os\nimport tkinter as tk\nimport typing\n\nfrom tkinter import scrolledtext\n\n## Custom modules\nfrom Models.Kaiseki import Kaiseki\nfrom Models.Kijiku import Kijiku\nfrom Modules.toolkit import check_update\nfrom Kudasai import Kudasai\n\n##-------------------start-of-KudasaiGUI---------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\nclass KudasaiGUI:\n\n \"\"\"\n\n The gui for Kudasai\\n\n\n \"\"\"\n\n##-------------------start-of-__init__()---------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n def __init__(self) -> None:\n\n \"\"\"\n\n Initializes the KudasaiGUI class\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n None\\n\n\n \"\"\"\n\n os.system(\"title \" + \"Kudasai Console\")\n\n self.text_color = \"#FFFFFF\"\n self.primary_color = \"#000000\"\n self.secondary_color = \"#202020\"\n\n self.main_window = tk.Tk()\n self.main_window.title(\"Kudasai GUI\")\n self.main_window.configure(bg=\"#202020\")\n self.main_window.geometry(\"1200x600\")\n self.main_window.resizable(False, False) ## Prevents resizing of window\n\n self.main_window.protocol(\"WM_DELETE_WINDOW\", self.on_main_window_close) ## causes all windows to close when the main window is closed\n\n if(os.name == 'nt'): ## Windows\n self.config_dir = os.path.join(os.environ['USERPROFILE'],\"KudasaiConfig\")\n else: ## Linux\n self.config_dir = os.path.join(os.path.expanduser(\"~\"), \"KudasaiConfig\")\n\n self.script_dir = os.path.dirname(os.path.abspath(__file__))\n\n self.output_dir = os.path.join(self.script_dir, \"KudasaiOutput\")\n\n self.replacement_json_files = []\n self.replacement_json_paths = []\n\n self.translation_model_files = []\n self.translation_model_paths = []\n\n self.replacement_json_files, self.replacement_json_paths = self.get_json_options() \n self.translation_model_files, self.translation_model_paths = self.get_translation_mode_options() \n\n self.unpreprocessed_text_path = os.path.join(self.config_dir,\"unpreprocessed_text.txt\")\n self.gui_temp_translation_log_path = os.path.join(self.config_dir,\"guiTempTranslationLog.txt\")\n\n self.gui_temp_kaiseki_path = os.path.join(self.config_dir,\"guiTempKaiseki.txt\")\n self.gui_temp_kijiku_path = os.path.join(self.config_dir,\"guiTempKijiku.txt\")\n self.is_there_update_path = os.path.join(self.config_dir, \"isThereUpdate.txt\")\n\n self.kudasai_results_path = os.path.join(os.path.join(self.script_dir,\"KudasaiOutput\"), \"output.txt\")\n self.translated_text_path = os.path.join(os.path.join(self.script_dir,\"KudasaiOutput\"), \"translatedText.txt\")\n self.preprocessed_text_path = os.path.join(os.path.join(self.script_dir,\"KudasaiOutput\"), \"preprocessedText.txt\")\n\n ## preprocessing and translating clients are set to None before they are initialized\n self.kudasai_client = None\n self.kaiseki_client = None\n self.kijiku_client = None\n\n if(not os.path.exists(self.output_dir)):\n os.mkdir(self.output_dir)\n\n check_update(True)\n\n with open(self.is_there_update_path, 'r', encoding='utf-8') as file:\n if(file.read() == \"true\"):\n self.create_update_alert_window()\n\n self.create_secondary_window()\n\n self.create_text_entry()\n\n self.create_frames()\n\n self.create_preprocess_button()\n self.create_json_option_menu()\n\n self.create_copy_button()\n self.create_translate_button()\n\n self.create_translation_mode_menu()\n\n self.create_main_output_label()\n\n##-------------------start-of-reset_preprocessing_files()---------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n def reset_preprocessing_files(self) -> None:\n\n \"\"\"\n\n Resets the preprocessing files\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n None\\n\n\n \"\"\"\n\n files_to_truncate = [\n self.kudasai_results_path,\n self.preprocessed_text_path\n ]\n\n for file_path in files_to_truncate: ## Creates files if they don't exist, truncates them if they do\n with open(file_path, 'w+') as file:\n file.truncate(0)\n\n##-------------------start-of-reset_preprocessing_files()---------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n def reset_translation_files(self) -> None:\n\n \"\"\"\n\n Resets the translation files\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n None\\n\n\n \"\"\"\n\n files_to_truncate = [\n self.translated_text_path,\n ]\n\n for file_path in files_to_truncate: ## Creates files if they don't exist, truncates them if they do\n with open(file_path, 'w+') as file:\n file.truncate(0)\n\n##-------------------start-of-reset_preprocessing_files()---------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n def reset_temp_files(self) -> None:\n\n \"\"\"\n\n Resets the standard temp files\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n None\\n\n\n \"\"\"\n\n files_to_truncate = [\n self.unpreprocessed_text_path,\n self.gui_temp_translation_log_path,\n self.gui_temp_kaiseki_path,\n self.gui_temp_kijiku_path,\n ]\n\n for file_path in files_to_truncate: ## Creates files if they don't exist, truncates them if they do\n with open(file_path, 'w+') as file:\n file.truncate(0)\n\n##-------------------start-of-create_secondary_window()-----------------------------------------------------------------------------------------------------------------------------------------------------\n\n def create_secondary_window(self) -> None:\n\n \"\"\"\n\n Creates the secondary window\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n None\\n\n\n \"\"\"\n\n self.secondary_window = tk.Tk()\n self.secondary_window.title(\"Results\")\n self.secondary_window.configure(bg=\"#202020\")\n self.secondary_window.geometry(\"300x600\")\n self.secondary_window.resizable(False, False) ## Prevents resizing of window\n\n self.secondary_window.withdraw() ## Hides the window\n\n self.create_results_output_label()\n\n##-------------------start-of-create_update_alert_window()()-----------------------------------------------------------------------------------------------------------------------------------------------------\n\n def create_update_alert_window(self) -> None:\n\n \"\"\"\n\n Creates the update alert window\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n None\\n\n\n \"\"\"\n\n self.update_alert_window = tk.Tk()\n self.update_alert_window.title(\"Please Update Kudasai\")\n self.update_alert_window.configure(bg=\"#202020\")\n self.update_alert_window.geometry(\"300x600\")\n self.update_alert_window.resizable(False, False) ## Prevents resizing of window\n\n patch_notes_path = os.path.join(self.config_dir, \"patchNotes.txt\")\n\n with open(patch_notes_path, 'r', encoding='utf-8') as file:\n release_notes = file.read()\n\n self.create_update_alert_output_label()\n\n self.update_alert_output_label.insert(tk.END, \"There is a new update for Kudasai\\nIt is recommended that you use the latest version of Kudasai\\nYou can download it at https://github.com/Seinuve/Kudasai/releases/latest \\n\" + release_notes) ## Display the update text\n\n self.update_alert_window.lift()\n\n##-------------------start-of-create_text_entry()-----------------------------------------------------------------------------------------------------------------------------------------------------------\n\n def create_text_entry(self) -> None:\n\n \"\"\"\n\n Creates the text entry box\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n None\\n\n\n \"\"\"\n\n self.text_entry = tk.Text(self.main_window, bg=self.primary_color, fg=self.text_color, height=18, width=600)\n self.text_entry.pack(side=tk.TOP)\n\n##-------------------start-of-create_main_output_label()----------------------------------------------------------------------------------------------------------------------------------------------------------------\n \n def create_main_output_label(self) -> None:\n\n \"\"\"\n\n Creates the output label\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n None\\n\n\n \"\"\"\n\n self.main_output_label = scrolledtext.ScrolledText(self.main_window, bg=self.primary_color, fg=self.text_color, height=18, width=600)\n self.main_output_label.pack(side=tk.BOTTOM)\n\n##-------------------start-of-create_results_output_label()-------------------------------------------------------------------------------------------------------------------------------------------\n\n def create_results_output_label(self) -> None:\n\n \"\"\"\n\n Creates the output label\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n None\\n\n\n \"\"\"\n\n self.secondary_output_label = scrolledtext.ScrolledText(self.secondary_window, bg=self.primary_color, fg=self.text_color, height=39, width=300)\n self.secondary_output_label.pack(side=tk.BOTTOM)\n\n##-------------------start-of-create_update_alert_output_label()-------------------------------------------------------------------------------------------------------------------------------------------\n\n def create_update_alert_output_label(self) -> None:\n\n self.update_alert_output_label = scrolledtext.ScrolledText(self.update_alert_window, bg=self.primary_color, fg=self.text_color, height=39, width=300)\n self.update_alert_output_label.pack(side=tk.BOTTOM)\n\n##-------------------start-of-create_frames()---------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n def create_frames(self) -> None:\n\n \"\"\"\n\n Creates the frames\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n None\\n\n\n \"\"\"\n\n self.top_button_frame = tk.Frame(self.main_window, bg=self.primary_color) ## Frame for the top buttons\n self.top_button_frame.pack()\n\n self.middle_button_frame = tk.Frame(self.main_window, bg=self.primary_color) ## Frame for the middle buttons\n self.middle_button_frame.pack()\n\n self.bottom_button_frame = tk.Frame(self.main_window, bg=self.primary_color) ## Frame for the bottom buttons\n self.bottom_button_frame.pack()\n\n##-------------------start-of-create_preprocess_button()---------------------------------------------------------------------------------------------------------------------------------------------------\n\n def create_preprocess_button(self) -> None:\n\n \"\"\"\n\n Creates the preprocess button\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n None\\n\n\n \"\"\"\n\n preprocess_button = tk.Button(self.top_button_frame, text=\"Preprocess\", bg=self.primary_color, fg=self.text_color, command=self.preprocess)\n preprocess_button.pack(side=tk.LEFT)\n\n##-------------------start-of-create_translate_button()-----------------------------------------------------------------------------------------------------------------------------------------------------\n\n def create_translate_button(self) -> None:\n\n \"\"\"\n\n Creates the translate button\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n None\\n\n\n \"\"\"\n\n translate_button = tk.Button(self.bottom_button_frame, text=\"Translate\", bg=self.primary_color, fg=self.text_color, command=self.translate)\n translate_button.pack(side=tk.LEFT)\n\n##-------------------start-of-create_copy_button()--------------------------------------------------------------------------------------------------------------------------------------------------------\n\n def create_copy_button(self) -> None:\n\n \"\"\"\n\n Creates the copy button\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n None\\n\n\n \"\"\"\n\n copy_button = tk.Button(self.middle_button_frame, text=\"Copy Output\", bg=self.primary_color, fg=self.text_color, command=self.copy_output)\n copy_button.pack(side=tk.RIGHT)\n\n##-------------------start-of-create_json_option_menu()---------------------------------------------------------------------------------------------------------------------------------------------------\n\n def create_json_option_menu(self) -> None:\n\n \"\"\"\n\n Creates the json option menu\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n None\\n\n\n \"\"\"\n\n options = self.replacement_json_files\n self.selected_json_option = tk.StringVar() \n self.selected_json_option.set(options[0]) ## Set default value\n\n json_option_menu = tk.OptionMenu(self.top_button_frame, self.selected_json_option, *options)\n json_option_menu.configure(bg=self.primary_color, fg=self.text_color, highlightbackground=self.primary_color, activebackground=self.primary_color, menu=json_option_menu['menu']) # Set colors\n\n menu = json_option_menu['menu'] \n menu.configure(bg=self.primary_color, fg=self.text_color) ## Set colors\n\n json_option_menu.pack(side=tk.LEFT)\n\n##-------------------start-of-create_translation_mode_menu()---------------------------------------------------------------------------------------------------------------------------------------------------\n\n def create_translation_mode_menu(self) -> None:\n\n \"\"\"\n\n Creates the translation mode menu\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n None\\n\n\n \"\"\"\n\n options = self.translation_model_files\n self.selected_translation_mode = tk.StringVar() \n self.selected_translation_mode.set(options[0]) ## Set default value\n\n translation_mode_menu = tk.OptionMenu(self.bottom_button_frame, self.selected_translation_mode, *options)\n translation_mode_menu.configure(bg=self.primary_color, fg=self.text_color, highlightbackground=self.primary_color, activebackground=self.primary_color, menu=translation_mode_menu['menu'])\n\n menu = translation_mode_menu['menu']\n menu.configure(bg=self.primary_color, fg=self.text_color) ## Set colors\n\n translation_mode_menu.pack(side=tk.RIGHT)\n\n##-------------------start-of-get_json_options()-----------------------------------------------------------------------------------------------------------------------------------------------------------\n\n def get_json_options(self) -> tuple[typing.List[str], typing.List[str]]:\n\n \"\"\"\n\n Gets the json options from the Replacements folder\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n replacement_json_files (list): List of json files\\n\n replacement_json_paths (list): List of json file paths\\n\n\n \"\"\"\n\n json_folder = os.path.join(self.script_dir,\"Replacements\")\n\n replacement_json_files = []\n replacement_json_paths = []\n\n for file_name in os.listdir(json_folder):\n\n if(file_name.endswith(\".json\")): ## If the file is a json file, add it to the options\n file_path = os.path.join(json_folder, file_name)\n replacement_json_files.append(file_name)\n replacement_json_paths.append(file_path)\n\n return replacement_json_files, replacement_json_paths\n \n##-------------------start-of-get_translation_mode_options()------------------------------------------------------------------------------------------------------------------------------------------------\n\n def get_translation_mode_options(self) -> tuple[typing.List[str], typing.List[str]]:\n\n \"\"\"\n\n Gets the translation mode options from the Models folder\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n translation_model_files (list): List of model files\\n\n translation_model_paths (list): List of model file paths\\n\n\n \"\"\"\n\n model_folder = os.path.join(self.script_dir,\"Models\")\n\n translation_model_files = []\n translation_model_paths = []\n\n for file_name in os.listdir(model_folder):\n\n if(file_name.endswith(\".py\") and not file_name.startswith(\"__init__\") and not file_name.startswith(\"Kansei\")): ## If the file is a model file, add it to the options\n file_path = os.path.join(model_folder, file_name)\n file_name = file_name.replace(\".py\", \"\")\n translation_model_files.append(file_name)\n translation_model_paths.append(file_path)\n\n return translation_model_files, translation_model_paths\n \n##-------------------start-of-preprocess()----------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n def preprocess(self) -> None:\n\n \"\"\"\n\n Preprocesses the text in the text entry box\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n None\\n\n\n \"\"\"\n\n self.reset_temp_files() # Reset the files\n self.reset_preprocessing_files\n\n unpreprocessed_text = self.text_entry.get(\"1.0\", tk.END)\n\n with open(self.unpreprocessed_text_path, \"w+\", encoding=\"utf-8\") as file: ## Write the text to a temporary file for Kudasai to read and preprocess\n file.write(unpreprocessed_text)\n\n if(self.kudasai_client is None): ## If the Kudasai object has not been created yet, create it (this is for the first time the user presses the preprocess button\n self.kudasai_client = Kudasai(from_gui=True) ## creates Kudasai object, passing if it is being run from the GUI or not\n \n self.kudasai_client.preprocess(self.unpreprocessed_text_path, self.replacement_json_paths[self.replacement_json_files.index(self.selected_json_option.get())]) ## Preprocess the text\n\n with open(self.preprocessed_text_path, \"r+\", encoding=\"utf-8\") as file:\n preprocessed_text = file.read() ## Read the preprocessed text\n\n with open(self.kudasai_results_path, \"r+\", encoding=\"utf-8\") as file:\n kudasai_results = file.read() ## Read the results\n\n self.main_output_label.delete(\"1.0\", tk.END)\n self.main_output_label.insert(tk.END, preprocessed_text) ## Display the preprocessed text\n \n try: ## Try to display the results\n self.secondary_output_label.delete(\"1.0\", tk.END)\n self.secondary_output_label.insert(tk.END, kudasai_results)\n\n except: ## if window is destroyed, create a new one\n self.create_secondary_window()\n self.secondary_output_label.insert(tk.END, kudasai_results)\n\n self.secondary_window.deiconify() ## Show the results window\n\n self.secondary_window.mainloop()\n\n##-------------------start-of-translate()-----------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n def translate(self) -> None:\n\n \"\"\"\n\n Translates the text in the text entry box\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n None\\n\n\n \"\"\" \n\n self.reset_temp_files() ## Reset the files\n self.reset_translation_files()\n\n if(self.selected_translation_mode.get() == \"Kijiku\"):\n self.handle_kijiku()\n \n elif(self.selected_translation_mode.get() == \"Kaiseki\"):\n self.handle_kaiseki()\n\n##-------------------start-of-copy_output()-----------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n def copy_output(self) -> None:\n\n \"\"\"\n\n Copies the output to the clipboard\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n None\\n\n\n \"\"\"\n\n self.main_window.clipboard_clear()\n self.main_window.clipboard_append(self.main_output_label.get(\"1.0\", \"end-1c\"))\n\n##-------------------start-of-handle_kijiku()---------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n def handle_kijiku(self) -> None:\n\n \"\"\"\n\n Handles the gui's interaction with the Kijiku model\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n None\\n\n\n \"\"\"\n\n untranslated_text = self.text_entry.get(\"1.0\", tk.END) ## Get the text from the text entry box\n\n with open(self.gui_temp_kijiku_path, \"w+\", encoding=\"utf-8\") as file: ## Write the text to a temporary file for Kijiku to read and translate\n file.write(untranslated_text)\n\n if(self.kijiku_client is None): ## If the Kijiku object has not been created yet, create it (this is for the first time the user presses the translate button\n self.kijiku_client = Kijiku(self.config_dir,self.script_dir,from_gui=True) ## creates Kijiku object, passing the path to the config directory, the path to the script directory, and if it is being run from the GUI or not\n\n self.kijiku_client.translate(self.gui_temp_kijiku_path)\n\n with open(self.translated_text_path, \"r+\", encoding=\"utf-8\") as file:\n translated_text = file.read() ## Read the translated text\n\n with open(self.gui_temp_translation_log_path, \"r+\", encoding=\"utf-8\") as file: # Write the text to a temporary file\n kijiku_results = file.read() ## Read the results\n \n self.main_output_label.delete(\"1.0\", tk.END)\n self.main_output_label.insert(tk.END, translated_text)\n\n try: ## Try to display the results\n self.secondary_output_label.delete(\"1.0\", tk.END)\n self.secondary_output_label.insert(tk.END, kijiku_results)\n\n except: ## if window is destroyed, create a new one\n self.create_secondary_window()\n self.secondary_output_label.insert(tk.END, kijiku_results)\n\n self.secondary_window.deiconify() ## Show the results window\n self.secondary_window.mainloop()\n \n##-------------------start-of-handle_kaiseki()--------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n def handle_kaiseki(self) -> None:\n\n \"\"\"\n\n Handles the gui's interaction with the Kaiseki model\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n None\\n\n\n \"\"\"\n\n untranslated_text = self.text_entry.get(\"1.0\", tk.END) ## Get the text from the text entry box\n\n with open(self.gui_temp_kaiseki_path, \"w+\", encoding=\"utf-8\") as file: ## Write the text to a temporary file for Kaiseki to read and translate\n file.write(untranslated_text)\n\n if(self.kaiseki_client is None): ## If the Kaiseki object has not been created yet, create it (this is for the first time the user presses the translate button\n self.kaiseki_client = Kaiseki(self.config_dir,self.script_dir,from_gui=True) ## creates Kaiseki object, passing the path to the config directory, the path to the script directory, and if it is being run from the GUI or not\n\n self.kaiseki_client.translate(self.gui_temp_kaiseki_path)\n\n with open(self.translated_text_path, \"r+\", encoding=\"utf-8\") as file:\n translated_text = file.read() ## Read the translated text\n\n with open(self.gui_temp_translation_log_path, \"r+\", encoding=\"utf-8\") as file: ## Write the text to a temporary file\n kaiseki_results = file.read()\n\n self.main_output_label.delete(\"1.0\", tk.END)\n self.main_output_label.insert(tk.END, translated_text)\n\n try: ## Try to display the results\n self.secondary_output_label.delete(\"1.0\", tk.END)\n self.secondary_output_label.insert(tk.END, kaiseki_results)\n\n except: ## if window is destroyed, create a new one\n self.create_secondary_window()\n self.secondary_output_label.insert(tk.END, kaiseki_results)\n\n self.secondary_window.deiconify() ## Show the results window\n self.secondary_window.mainloop()\n\n##-------------------start-of-on_main_window_close()-------------------------------------------------------------------------------------------------------------------------------------------------------\n\n def on_main_window_close(self) -> None:\n\n \"\"\"\n\n Handles what happens when the main window is closed\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n None\\n\n\n \"\"\"\n\n try:\n self.update_alert_window.destroy()\n except:\n pass\n\n try:\n self.secondary_window.destroy()\n except:\n pass\n \n try:\n self.main_window.destroy()\n except:\n pass\n\n##-------------------start-of-run()------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n def run(self) -> None:\n\n \"\"\"\n\n Runs the GUI\\n\n\n Parameters:\\n\n self (object - KudasaiGUI) : The KudasaiGUI object\\n\n\n Returns:\\n\n None\\n\n\n \"\"\"\n\n self.main_window.mainloop()\n\n##-------------------start-of-main()-----------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\nif(__name__ == \"__main__\"):\n gui = KudasaiGUI()\n gui.run()\n","repo_name":"Seinuve/Kudasai","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":27426,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"17557020751","text":"from typing import Any, Dict, List\n\nimport pandas as pd\nimport numpy as np\nimport logging\nimport wandb\n\ndef clean_data(transactions: pd.DataFrame, params: Dict) -> pd.DataFrame:\n\n log = logging.getLogger(__name__)\n\n wandb.init(\n project=params[\"wandb_project\"],\n notes=\"implicit_pipeline\",\n config=params,\n )\n\n filter_value = params.get(\"filter_value\", 2)\n minimum_order_size = params.get(\"minimum_order_size\", 5)\n maximum_order_size = params.get(\"maximum_order_size\", 20)\n\n # Need to filter out products that didn't show up in more than some number of orders\n product_group = transactions.loc[:, ['order_id', 'product_id']].groupby('product_id').count()\n \n multi_product = product_group[product_group.order_id >= filter_value].count()\n single_product = product_group[product_group.order_id < filter_value].count()\n \n log.info(\"Products in at least {} orders: {}\".format(filter_value, multi_product['order_id']))\n log.info(\"Products in less than {} orders: {}\".format(filter_value, single_product['order_id']))\n\n product_filter = product_group[product_group.order_id >= filter_value].index.tolist()\n product_filtered_df = transactions[transactions['product_id'].isin(product_filter)].copy()\n\n # Need to filter out orders that didn't have at least a minimum number of products\n order_group = product_filtered_df.loc[:, ['order_id', 'product_id']].groupby('order_id').count()\n \n multi_order = order_group[(order_group.product_id >= minimum_order_size) & (order_group.product_id <= maximum_order_size)].count()\n single_order = order_group[(order_group.product_id < minimum_order_size) | (order_group.product_id > maximum_order_size)].count()\n \n log.info(\"Orders with at least {} products: {}\".format(minimum_order_size, multi_order['product_id']))\n log.info(\"Orders with less than {} products: {}\".format(minimum_order_size, single_order['product_id']))\n \n order_filter = order_group[(order_group.product_id >= minimum_order_size) & (order_group.product_id <= maximum_order_size)].index.tolist()\n filtered_df = product_filtered_df[product_filtered_df['order_id'].isin(order_filter)].copy()\n\n log.info(\"Original dataframe length: {}\".format(len(transactions)))\n log.info(\"Filtered dataframe length: {}\".format(len(filtered_df)))\n\n product_counts = filtered_df['product_id'].value_counts().to_numpy()\n print('There are', len(product_counts), 'unique products\\n')\n \n order_counts = filtered_df['order_id'].value_counts()\n num_orders = len(order_counts)\n num_items = len(product_counts)\n sparsity = 1 - len(transactions) / (num_orders * num_items)\n log.info(\"Number of orders: {}, number of items: {}\".format(num_orders, num_items))\n print(f'matrix sparsity: {sparsity:f}')\n log.info(\"Matrix sparsity: {}\".format(sparsity))\n \n # Log to wandb\n wandb.log({\"sparsity\": sparsity})\n\n filtered_df['product_id'] = filtered_df['product_id'].astype(str)\n filtered_df['order_id'] = filtered_df['order_id'].astype(str)\n\n return filtered_df\n\n","repo_name":"HSV-AI/product-recommendation","sub_path":"src/productrec/pipelines/cleaning/nodes.py","file_name":"nodes.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"21032374226","text":"\"\"\"\nGrabs info from an RSS feed, and posts it in the server. TODO Clean\n\nthis cog is retired, and not loaded by the bot. If we want to add the features again, we can.\n\"\"\"\nfrom asyncio import sleep\n\nimport discord\nimport feedparser\nimport html2text\nfrom discord.ext import commands\n\n\nclass RSS(commands.Cog):\n \"\"\"\n The main class for the RSS handler.\n \"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.Cog.listener()\n async def rss_on_ready(self):\n \"\"\"\n This gets called to check if new stories are ready.\n \"\"\"\n\n def get_ids():\n \"\"\"Grabs the most recent article ID from each URL in\n the TOML, so that we can check what we have already sent\"\"\"\n most_recent_ids = []\n\n for news_source in self.bot.server_settings.rss_feed.keys():\n news_feed = feedparser.parse(self.bot.server_settings.rss_feed[news_source])\n\n id_code = news_feed[\"entries\"][0][\"id\"]\n most_recent_ids.append(id_code)\n\n return most_recent_ids\n\n def send_news(id_to_send):\n \"\"\"Uses the RSS ID's we grab from get_IDs that are NOT already in the DB.\n Sends out the news story if it is not in our DB.\n Each news story is parsed 'slightly' different, which is annoying.\"\"\"\n\n def send_python_software_foundation(id_tag):\n newsfeed = feedparser.parse(self.bot.server_settings.rss_feed[\"Python_Software_foundation\"])\n id_code = newsfeed[\"entries\"][0][\"id\"]\n if id_tag == id_code:\n # if the ID code in the article matches the ID we grabbed in get_IDs(), send.\n html_reader = html2text.HTML2Text()\n html_reader.ignore_links = False\n\n embed = discord.Embed(\n title=newsfeed[\"entries\"][0][\"title\"],\n description=f\"By: {newsfeed['entries'][0]['authors'][0]['name']}\" f\" at the Python Software Foundation!\",\n color=discord.Color.blue(),\n )\n\n embed.add_field(\n name=\"Preview: \",\n value=f\"{html_reader.handle(newsfeed['entries'][0]['summary'])[0:1015]}\" f\" ...\",\n inline=False,\n )\n\n embed.add_field(\n name=\"Read more: \",\n value=f\"[{newsfeed['entries'][0]['link']}]\" f\"({newsfeed['entries'][0]['link']})\",\n inline=False,\n )\n embed.set_thumbnail(url=\"https://www.python.org/static/img/python-logo@2x.png\")\n\n return embed\n return None\n\n def send_jetbrains(id_tag):\n jetbrains = self.bot.server_settings.rss_feed[\"jetbrains\"]\n newsfeed = feedparser.parse(jetbrains)\n id_code = newsfeed[\"entries\"][0][\"id\"]\n if id_tag == id_code:\n # if the ID code in the article matches the ID we grabbed in get_IDs(), send.\n\n html_reader = html2text.HTML2Text()\n html_reader.ignore_links = False\n\n # html = html_reader.handle(NewsFeed['entries'][0]['content'][0]['value'])\n\n embed = discord.Embed(\n title=f\"**{newsfeed['entries'][0]['title']}**\",\n description=f\"By: **{newsfeed['entries'][0]['author']}** at Jetbrains!\",\n color=discord.Color.blue(),\n )\n\n summary_text = f\"{newsfeed['entries'][0]['summary'][0:1015]} ...\"\n embed.add_field(name=\"**Preview: **\", value=summary_text, inline=False)\n embed.add_field(\n name=\"**Read more: **\",\n value=f\"[{newsfeed['entries'][0]['link']}]\" f\"({newsfeed['entries'][0]['link']})\",\n inline=False,\n )\n embed.set_thumbnail(url=newsfeed[\"entries\"][0][\"featuredimage\"])\n return embed\n return None\n\n embed = send_python_software_foundation(id_to_send)\n embed2 = send_jetbrains(id_to_send)\n if embed is not None:\n return embed\n return embed2\n\n while True:\n sent_id_list = []\n\n all_ids = self.bot.db_client.get_all_stories()\n for id_feed in all_ids:\n sent_id_list.append(id_feed)\n\n print(sent_id_list)\n story_queue = get_ids()\n\n for entry in story_queue:\n if entry not in sent_id_list:\n news_channel = await self.bot.fetch_channel(self.bot.server_settings.normal_channel[\"news_channel\"])\n\n await news_channel.send(embed=send_news(entry))\n self.bot.db_client.add_story_to_table(entry)\n\n wait_time_in_seconds = 86400 # 24 hour\n await sleep(wait_time_in_seconds)\n\n\ndef setup(bot):\n \"\"\"required\"\"\"\n bot.add_cog(RSS(bot))\n","repo_name":"practical-python-org/ZorakBot","sub_path":"src/zorak/cogs/utility/_rss.py","file_name":"_rss.py","file_ext":"py","file_size_in_byte":5164,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"52"} +{"seq_id":"27946903469","text":"\"\"\"\n./controller/base_controller.py\n\"\"\"\n\nclass BaseController:\n def __init__(self, model, view):\n self.model = model\n self.view = view\n self.view.set_controller(self)\n\n def get_bot_response(self, bot, chat_history, user):\n agent = self.model.agents.get(bot.lower())\n response = self.model.get_chat_response(agent, chat_history, user)\n chat_history.append({\"role\": \"assistant\", \"content\": f\"{response}\"})\n\n return response\n\n def switch_to_base_mode(self):\n self.model.mode = 0\n self.view.withdraw()\n from view.discord_gui import BaseGUI\n self.view = BaseGUI()\n self.view.create_main_frame()\n\n def switch_to_chat_mode(self):\n self.model.mode = 0\n self.view.withdraw()\n from view.discord_gui import DiscordGUI\n from controller.chat_controller import ChatController\n from model.chat_model import ChatModel\n self.model = ChatModel()\n self.view = DiscordGUI()\n controller = ChatController(self.model, self.view)\n self.view.set_controller(controller)\n self.view.create_main_frame()\n\n def switch_to_zoom_mode(self):\n self.model.mode = 1\n self.view.destroy()\n from view.zoom_gui import ZoomGUI\n self.view = ZoomGUI()\n self.view.set_controller(self)\n self.view.create_main_frame()\n self.view.run()\n","repo_name":"aamindehkordi/Emulated-Agents","sub_path":"controller/base_controller.py","file_name":"base_controller.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"7896601109","text":"'''\nNxN개의 수가 표에 채워져 있다\n(x1,y1)부터 (x2,y2)까지 합을 구하라\n'''\n\nimport sys\ninput = sys.stdin.readline\n\nN, M = map(int, input().split()) # N: 표의 크기 / M: 합을 구해야 하는 횟수\narr = [[0]*(N+1)]\ns_arr = [[0]*(N+1) for _ in range(N+1)] # 구간 합 리스트\n\n# 원본 리스트 받기\nfor _ in range(N):\n row = [0] + list(map(int, input().split()))\n arr.append(row)\n\n# 구간 합 리스트 채우기\nfor i in range(1, N+1):\n for j in range(1, N+1):\n s_arr[i][j] = s_arr[i][j-1] + s_arr[i-1][j] - s_arr[i-1][j-1] + arr[i][j]\n\n# 질의 값 출력\nfor _ in range(M):\n x1, y1, x2, y2 = map(int, input().split())\n answer = s_arr[x2][y2] - s_arr[x1-1][y2] - s_arr[x2][y1-1] + s_arr[x1-1][y1-1]\n print(answer)\n\n\n\n# for _ in range(N):\n# ls = list(map(int, input().split()))\n# target = [ls[0]]\n# for i in range(1, N):\n# target.append(target[i-1] + ls[i])\n# s_arr.append(target)\n#\n# for _ in range(M):\n# ans = 0\n# x1, y1, x2, y2 = map(lambda x: int(x) - 1, input().split()) # x1 <= x2 , y1 <= y2\n# for r in range(x1, x2+1):\n# ans += s_arr[r][y2]\n# if y1 > 0:\n# ans -= s_arr[r][y1-1]\n# print(ans)","repo_name":"Going777/Algorithm","sub_path":"BOJ/Silver/11660_구간 합 구하기 5.py","file_name":"11660_구간 합 구하기 5.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12165335213","text":"import pygame\nimport copy\n\nfrom .Planet import Planet\nfrom .Universe import Universe\nfrom .View import View\nfrom .vecN import Vec3\n\n\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nFPS = 60\nTIMESTEP = 0.01\n\n\nclass App:\n\n def __init__(self):\n self.view = View(SCREEN_WIDTH, SCREEN_HEIGHT)\n self.planets = []\n self.universe = Universe(TIMESTEP, 1)\n self.fps = FPS\n self.clock = pygame.time.Clock()\n self.runSimulation = False\n self.selectedPlanet = None\n self.initialUniverse = None\n\n def run(self):\n \"\"\"Run the application\"\"\"\n while self.view.running:\n self._playUniverseContruction()\n\n if self.runSimulation:\n self.pause = False\n self._playSimulation()\n\n if self.initialUniverse:\n self.universe = copy.copy(self.initialUniverse)\n\n def _playSimulation(self):\n \"\"\"Simulate the orbits and show them on the screen\"\"\"\n self.view.constructionMode = False\n self.initialUniverse = copy.deepcopy(self.universe)\n\n while self.view.running:\n self.clock.tick(self.fps)\n self.view.drawUniverse(self.universe)\n\n if not self.pause:\n self.universe.stepTime()\n\n action = self.view.handleEvents(self.universe.planets)\n\n if action:\n if action.type == 'PAUSE':\n self.pause = not self.pause\n if action.type == 'STOP':\n self.runSimulation = False\n return\n\n if action.type == 'FPS_UP':\n self.fps += 1\n if action.type == 'FPS_DOWN':\n self.fps -= 1\n\n def _playUniverseContruction(self):\n \"\"\"Show the universe construction screen\"\"\"\n current_color = 0\n color_list = [\n (250, 250, 250),\n (10, 10, 250),\n (255, 10, 10),\n (10, 250, 10),\n (250, 250, 10),\n (10, 250, 250),\n (250, 250, 10)\n ]\n\n self.view.constructionMode = True\n\n while self.view.running and not self.runSimulation:\n self.clock.tick(self.fps)\n self.view.drawUniverse(self.universe)\n\n action = self.view.handleEvents(self.universe.planets)\n\n if action:\n if action.type == 'ADD_PLANET':\n self.universe.addPlanet(Planet(\n 100,\n action.payload['pos'],\n action.payload['vel'],\n color_list[current_color]\n ))\n\n current_color += 1\n if current_color == len(color_list):\n current_color = 0\n\n self.selectedPlanet = None\n\n if action.type == 'SELECT_PLANET':\n self.selectedPlanet = action.payload\n\n if action.type == 'SET_VEL' and \\\n self.selectedPlanet is not None:\n planets = self.universe.planets\n drag = action.payload - planets[self.selectedPlanet].pos\n\n if abs(drag) > 1.2 * planets[self.selectedPlanet].radius:\n planets[self.selectedPlanet].vel = 0.667 * Vec3(drag)\n self.universe.setPlanets(planets)\n\n self.selectedPlanet = None\n\n if action.type == 'REMOVE_PLANET':\n self.universe.removePlanet(action.payload)\n\n if action.type == 'START_SIMULATION' or action.type == 'STOP' \\\n or action.type == 'PAUSE':\n self.runSimulation = True\n","repo_name":"JoaoAPS/PlanetSimulation","sub_path":"src/App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42420844080","text":"# use a machine learning api to train and predict the sentiment\nimport pandas as pd\ndf = pd.read_csv(\"python/data_analyst/amazon_cells_labelled.txt\", names =['review', 'sentiments'], sep ='\\t')\n\n# use pip3 install -U scikit-learn scipy matplotlib to install sklearn\nfrom sklearn.model_selection import train_test_split\nreviews = df['review'].values\nsentiments = df['sentiments'].values\nreviews_train, reviews_test, sentiment_train, sentiment_test = train_test_split(reviews, sentiments, test_size=0.2, random_state=500)\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nvectorizer = CountVectorizer()\nvectorizer.fit(reviews)\nX_train = vectorizer.transform(reviews_train)\nX_test = vectorizer.transform(reviews_test)\n\nfrom sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression()\nclassifier.fit(X_train, sentiment_train)\n\naccuracy = classifier.score(X_test, sentiment_test)\nprint('Accuracy: ', accuracy)","repo_name":"niurouyang/python","sub_path":"data_analyst/ML_Amazon.py","file_name":"ML_Amazon.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32212624773","text":"\"\"\"Habit Tracking using Pixela tracking the number of pages I read daily\"\"\"\nimport requests\nimport datetime\n\n# Constants\nUSERNAME = \"taylornovara\"\nTOKEN = \"kj235n24n3k2ln423lk4n\"\nID = \"reading-graph1\"\nDATE = datetime.datetime.now()\n\nuser_endpoint = \"https://pixe.la/v1/users\"\n\n# The required parameters to create an account on Pixela\nuser_params = {\n \"token\": TOKEN,\n \"username\": USERNAME,\n \"agreeTermsOfService\": \"yes\",\n \"notMinor\": \"yes\"\n}\n\n# Creates our profile on Pixela\n# response = requests.post(url=user_endpoint, json=user_params)\n# print(response.text)\n\ngraph_endpoint = f'{user_endpoint}/{USERNAME}/graphs'\n\ngraph_config = {\n \"id\": ID,\n \"name\": \"Reading Graph\",\n \"unit\": \"Pages\",\n \"type\": \"int\",\n \"color\": \"sora\"\n}\n\nheaders = {\n \"X-USER-TOKEN\": TOKEN\n}\n\n# Creates a graph\n# response = requests.post(url=graph_endpoint, json=graph_config, headers=headers)\n# print(response.text)\n\nvalue_endpoint = f\"{user_endpoint}/{USERNAME}/graphs/{ID}\"\n\nvalue_params = {\n \"date\": f\"{DATE.strftime('%Y%m%d')}\",\n \"quantity\": \"1\",\n}\n\n# Adds a value to the graph\n# response = requests.post(url=value_endpoint, json=value_params, headers=headers)\n# print(response.text)\n\nupdate_value_endpoint = f\"{user_endpoint}/{USERNAME}/graphs/{ID}/{DATE.strftime('%Y%m%d')}\"\n\n# Updates an existing value\n# response = requests.put(url=update_value_endpoint, json=value_params, headers=headers)\n# print(response.text)\n\ndelete_value_endpoint = f\"{user_endpoint}/{USERNAME}/graphs/{ID}/{DATE.strftime('%Y%m%d')}\"\n\n# Deletes an existing value\n# response = requests.delete(url=delete_value_endpoint, headers=headers)\n# print(response.text)\n","repo_name":"taylornovara/habit_tracker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25341654788","text":"from django.shortcuts import render\nfrom django.utils import timezone\nfrom .models import Post, Comment\nfrom django.shortcuts import render, get_object_or_404\nfrom .forms import PostForm, CommentForm\nfrom django.shortcuts import redirect\nfrom django.contrib.auth.decorators import login_required\n\nfrom google.auth import app_engine\nfrom google.cloud import bigquery\nfrom google.cloud.bigquery import Dataset\n\nimport httplib2\nfrom oauth2client.contrib import gce\n\ndef post_list(request):\n #posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')\n qs = Post.objects.all()\n qs = qs.filter(published_date__lte=timezone.now())\n qs = qs.order_by('published_date')\n\n q = request.GET.get('q', '') # GET request의 인자중에 q 값이 있으면 가져오고, 없으면 빈 문자열 넣기\n if q: # q가 있으면\n qs = qs.filter(title__icontains=q) # 제목에 q가 포함되어 있는 레코드만 필터링\n \n return render(request, 'blog/post_list.html', {'post_list':qs, 'q' : q,})\n #modelname_function.html\n\ndef post_detail(request, pk):\n #pk = \"100\"\n post = get_object_or_404(Post, pk=pk)\n #try:\n # post = Post.objects.get(pk=pk)\n #except Post.DoesNotExist:\n # raise Http404 # Page Not Found\n \n credentials = gce.AppAssertionCredentials(scope='https://www.googleapis.com/auth/devstorage.read_write')\n http = credentials.authorize(httplib2.Http())\n \n client = bigquery.Client()\n \n # Perform a query.\n QUERY = (\n 'SELECT spc_common, zipcode FROM `bigquery-public-data.new_york_trees.tree_census_2015` '\n 'WHERE health = \"Good\" '\n 'LIMIT 10')\n \n query_job = client.query(QUERY) # API request\n rows = query_job.result() # Waits for query to finish\n \n for row in rows:\n print(row.spc_common)\n\n return render(request, 'blog/post_detail.html', {'post':post})\n\n@login_required\ndef post_new(request):\n if request.method == \"POST\":\n form = PostForm(request.POST, request.FILES)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = PostForm()\n return render(request, 'blog/post_edit.html', {'form': form})\n\n@login_required\ndef post_edit(request, pk):\n \n post = get_object_or_404(Post, pk=pk)\n\n if request.method == \"POST\":\n form = PostForm(request.POST, request.FILES, instance=post)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = PostForm(instance=post)\n\n return render(request, 'blog/post_edit.html', {'form':form})\n\n@login_required\ndef post_draft_list(request):\n posts = Post.objects.filter(published_date__isnull=True).order_by('created_date')\n return render(request, 'blog/post_draft_list.html', {'posts': posts})\n\ndef post_publish(request, pk):\n post = get_object_or_404(Post, pk=pk)\n post.publish()\n return redirect('post_detail', pk=pk)\n\n@login_required\ndef post_remove(request, pk):\n post = get_object_or_404(Post, pk=pk)\n post.delete()\n return redirect('post_list')\n\ndef add_comment_to_post(request, pk):\n post = get_object_or_404(Post, pk=pk)\n if request.method == \"POST\":\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.post = post\n comment.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = CommentForm()\n return render(request, 'blog/add_comment_to_post.html', {'form': form})\n \n@login_required\ndef comment_approve(request, pk):\n comment = get_object_or_404(Comment, pk=pk)\n comment.approve()\n return redirect('post_detail', pk=comment.post.pk)\n\n@login_required\ndef comment_remove(request, pk):\n comment = get_object_or_404(Comment, pk=pk)\n comment.delete()\n return redirect('post_detail', pk=comment.post.pk)","repo_name":"soup7soup/django_project","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40860520248","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom time import time\nfrom sklearn import manifold\nimport pickle\n\nfeature_embeddings = np.load('feature_embedding.npy') \n\n'''images is cifar test set, shaped (10000,32,32,3)'''\n'''https://psu.box.com/s/urgcz6ikanw6mbaxin2vws4fvthw73jw'''\nimages = np.load('cifar.npy') \n\n# use tsne to cluster images in 2 dimensions\ntsne = manifold.TSNE()\nreduced = tsne.fit_transform(feature_embeddings)\nreduced_transformed = reduced - np.min(reduced, axis=0)\nreduced_transformed /= np.max(reduced_transformed, axis=0)\nimage_xindex_sorted = np.argsort(np.sum(reduced_transformed, axis=1))\n\nplot_number=10000\nimage_width=32\nmerged_width = int(np.ceil(np.sqrt(plot_number))*image_width)\nmerged_image = np.zeros((merged_width, merged_width,3))\n\nfor counter, index in enumerate(image_xindex_sorted):\n # set location\n a = np.ceil(reduced_transformed[counter, 0] * (merged_width-image_width-1)+1)\n b = np.ceil(reduced_transformed[counter, 1] * (merged_width-image_width-1)+1)\n a = int(a - np.mod(a-1,image_width) + 1)\n b = int(b - np.mod(b-1,image_width) + 1)\n\n img = images[counter]\n merged_image[a:a+image_width, b:b+image_width,:] = img\n \nplt.imshow(merged_image)\nplt.show()\nplt.imsave('tsne.jpg',merged_image)","repo_name":"CongWeilin/cluster-loss-tensorflow","sub_path":"evaluation/tsne.py","file_name":"tsne.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"52"} +{"seq_id":"33357341001","text":"import asyncio\nimport sys\nimport uuid\n\nimport pytest\nimport pytest_mock\n\nimport pybotters.store\n\n\ndef test_interface():\n store = pybotters.store.DataStoreManager()\n store.create(\"example\")\n assert isinstance(store._stores, dict)\n assert isinstance(store._events, list)\n assert isinstance(store._iscorofunc, bool)\n assert \"example\" in store\n assert isinstance(store[\"example\"], pybotters.store.DataStore)\n\n store = pybotters.store.DataStoreManager(auto_cast=True)\n assert store._auto_cast is True\n store.create(\"example\")\n store[\"example\"]._auto_cast is True\n\n\n@pytest.mark.asyncio\nasync def test_interface_onmessage(mocker: pytest_mock.MockerFixture):\n store = pybotters.store.DataStoreManager()\n assert not store._iscorofunc\n store._events.append(asyncio.Event())\n store.onmessage({\"foo\": \"bar\"}, mocker.MagicMock())\n assert not len(store._events)\n\n\ndef test_ds():\n ds1 = pybotters.store.DataStore()\n assert len(ds1._data) == 0\n assert len(ds1._index) == 0\n assert len(ds1._keys) == 0\n\n ds2 = pybotters.store.DataStore(keys=[\"foo\", \"bar\"])\n assert len(ds2._data) == 0\n assert len(ds2._index) == 0\n assert len(ds2._keys) == 2\n\n ds3 = pybotters.store.DataStore(\n data=[{\"foo\": \"value1\", \"bar\": \"value1\"}, {\"foo\": \"value2\", \"bar\": \"value2\"}]\n )\n assert len(ds3._data) == 2\n assert len(ds3._index) == 0\n assert len(ds3._keys) == 0\n\n ds4 = pybotters.store.DataStore(\n keys=[\"foo\", \"bar\"],\n data=[{\"foo\": \"value1\", \"bar\": \"value1\"}, {\"foo\": \"value2\", \"bar\": \"value2\"}],\n )\n assert len(ds4._data) == 2\n assert len(ds4._index) == 2\n assert len(ds4._keys) == 2\n\n class DataStoreWithKeys(pybotters.store.DataStore):\n _KEYS = [\"foo\", \"bar\"]\n\n ds5 = DataStoreWithKeys()\n assert len(ds5._data) == 0\n assert len(ds5._index) == 0\n assert len(ds5._keys) == 2\n\n\ndef test_hash():\n hashed = pybotters.store.DataStore._hash({\"foo\": \"bar\"})\n assert isinstance(hashed, int)\n\n\ndef test_cast_item():\n actual = {\n \"num_int\": 123,\n \"num_float\": 1.23,\n \"str_int\": \"123\",\n \"str_float\": \"1.23\",\n \"str_orig\": \"foo\",\n \"bool\": True,\n \"null\": None,\n }\n expected = {\n \"num_int\": 123,\n \"num_float\": 1.23,\n \"str_int\": 123,\n \"str_float\": 1.23,\n \"str_orig\": \"foo\",\n \"bool\": True,\n \"null\": None,\n }\n pybotters.store.DataStore._cast_item(actual)\n assert expected == actual\n\n\ndef test_sweep_with_key():\n data = [{\"foo\": f\"bar{i}\"} for i in range(1000)]\n ds = pybotters.store.DataStore(keys=[\"foo\"], data=data)\n ds._MAXLEN = len(data) - 100\n ds._sweep_with_key()\n assert len(ds._data) == 900\n assert len(ds._index) == 900\n\n\ndef test_sweep_without_key():\n data = [{\"foo\": f\"bar{i}\"} for i in range(1000)]\n ds = pybotters.store.DataStore(data=data)\n ds._MAXLEN = len(data) - 100\n ds._sweep_without_key()\n assert len(ds._data) == 900\n assert len(ds._index) == 0\n\n\ndef test_insert():\n data = [{\"foo\": f\"bar{i}\"} for i in range(1000)]\n\n ds1 = pybotters.store.DataStore(keys=[\"foo\"])\n ds1._insert(data)\n assert len(ds1._data) == 1000\n assert len(ds1._index) == 1000\n assert isinstance(next(iter(ds1._data.keys())), uuid.UUID)\n assert isinstance(next(iter(ds1._data.values())), dict)\n assert isinstance(next(iter(ds1._index.keys())), int)\n assert isinstance(next(iter(ds1._index.values())), uuid.UUID)\n\n ds2 = pybotters.store.DataStore()\n ds2._insert(data)\n assert len(ds2._data) == 1000\n assert len(ds2._index) == 0\n assert isinstance(next(iter(ds2._data.keys())), uuid.UUID)\n assert isinstance(next(iter(ds2._data.values())), dict)\n\n ds3 = pybotters.store.DataStore(keys=[\"invalid\"])\n ds3._insert(data)\n assert len(ds3._data) == 0\n assert len(ds3._index) == 0\n\n\ndef test_update():\n data = [{\"foo\": f\"bar{i}\"} for i in range(1000)]\n newdata = [{\"foo\": f\"bar{i}\"} for i in range(1000, 2000)]\n\n ds1 = pybotters.store.DataStore(keys=[\"foo\"], data=data)\n ds1._update(data)\n assert len(ds1._data) == 1000\n assert len(ds1._index) == 1000\n\n ds2 = pybotters.store.DataStore(keys=[\"foo\"], data=data)\n ds2._update(newdata)\n assert len(ds2._data) == 2000\n assert len(ds2._index) == 2000\n assert isinstance(list(ds2._data.keys())[-1], uuid.UUID)\n assert isinstance(list(ds2._data.values())[-1], dict)\n assert isinstance(list(ds2._index.keys())[-1], int)\n assert isinstance(list(ds2._index.values())[-1], uuid.UUID)\n\n ds3 = pybotters.store.DataStore()\n ds3._update(data)\n assert len(ds3._data) == 1000\n assert len(ds3._index) == 0\n assert isinstance(next(iter(ds3._data.keys())), uuid.UUID)\n assert isinstance(next(iter(ds3._data.values())), dict)\n\n ds4 = pybotters.store.DataStore(keys=[\"invalid\"])\n ds4._update(data)\n assert len(ds4._data) == 0\n assert len(ds4._index) == 0\n\n\ndef test_delete():\n data = [{\"foo\": f\"bar{i}\"} for i in range(1000)]\n nodata = [{\"foo\": f\"bar{i}\"} for i in range(1000, 2000)]\n invalid = [{\"invalid\": f\"data{i}\"} for i in range(1000, 2000)]\n\n ds1 = pybotters.store.DataStore(keys=[\"foo\"], data=data)\n ds1._delete(data)\n assert len(ds1._data) == 0\n assert len(ds1._index) == 0\n\n ds2 = pybotters.store.DataStore(keys=[\"foo\"], data=data)\n ds2._delete(nodata)\n assert len(ds2._data) == 1000\n assert len(ds2._index) == 1000\n\n ds3 = pybotters.store.DataStore(keys=[\"foo\"], data=data)\n ds3._delete(invalid)\n assert len(ds3._data) == 1000\n assert len(ds3._index) == 1000\n\n\ndef test_pop():\n data = [{\"foo\": f\"bar{i}\"} for i in range(1000)]\n\n ds1 = pybotters.store.DataStore(keys=[\"foo\"], data=data)\n assert ds1._pop({\"foo\": \"bar500\"}) == {\"foo\": \"bar500\"}\n assert ds1.get({\"foo\": \"bar500\"}) is None\n assert ds1._pop({\"foo\": \"bar9999\"}) is None\n\n ds2 = pybotters.store.DataStore(data=data)\n assert ds2._pop({\"foo\": \"bar500\"}) is None\n\n\ndef test_find_and_delete():\n data = [{\"foo\": f\"bar{i}\", \"mod\": i % 2} for i in range(1000)]\n query = {\"mod\": 1}\n invalid = {\"mod\": -1}\n\n ds1 = pybotters.store.DataStore(keys=[\"foo\"], data=data)\n ret1 = ds1._find_and_delete()\n # return value\n assert isinstance(ret1, list)\n assert len(ret1) == 1000\n # data store\n assert len(ds1._data) == 0\n assert len(ds1._index) == 0\n\n ds2 = pybotters.store.DataStore(keys=[\"foo\"], data=data)\n ret2 = ds2._find_and_delete(query)\n # return value\n assert isinstance(ret2, list)\n assert len(ret2) == 500\n assert all(map(lambda record: 1 == record[\"mod\"], ret2))\n # data store\n assert len(ds2._data) == 500\n assert all(map(lambda record: 0 == record[\"mod\"], ds2._data.values()))\n assert len(ds2._index) == 500\n\n ds3 = pybotters.store.DataStore(keys=[\"foo\"], data=data)\n ret3 = ds3._find_and_delete(invalid)\n # return value\n assert isinstance(ret3, list)\n assert len(ret3) == 0\n # data store\n assert len(ds3._data) == 1000\n assert len(ds3._index) == 1000\n\n\ndef test_clear():\n data = [{\"foo\": f\"bar{i}\"} for i in range(1000)]\n ds = pybotters.store.DataStore(keys=[\"foo\"], data=data)\n ds._clear()\n assert len(ds._data) == 0\n assert len(ds._index) == 0\n\n\ndef test_get():\n data = [{\"foo\": f\"bar{i}\"} for i in range(1000)]\n\n ds1 = pybotters.store.DataStore(keys=[\"foo\"], data=data)\n assert ds1.get({\"foo\": \"bar500\"}) == {\"foo\": \"bar500\"}\n assert ds1.get({\"foo\": \"bar9999\"}) is None\n\n ds2 = pybotters.store.DataStore(data=data)\n assert ds2.get({\"foo\": \"bar500\"}) is None\n\n\ndef test_find():\n data = [{\"foo\": f\"bar{i}\", \"mod\": i % 2} for i in range(1000)]\n query = {\"mod\": 1}\n invalid = {\"mod\": -1}\n ds = pybotters.store.DataStore(keys=[\"foo\"], data=data)\n assert isinstance(ds.find(), list)\n assert len(ds.find()) == 1000\n assert len(ds.find(query)) == 500\n assert len(ds.find(invalid)) == 0\n\n\ndef test__len__():\n data = [{\"foo\": f\"bar{i}\"} for i in range(1000)]\n ds = pybotters.store.DataStore(keys=[\"foo\"], data=data)\n assert len(ds) == 1000\n\n\ndef test__iter__():\n data = [{\"foo\": f\"bar{i}\"} for i in range(5)]\n ds = pybotters.store.DataStore(keys=[\"foo\"], data=data)\n data_iter = iter(ds)\n assert next(data_iter) == {\"foo\": \"bar0\"}\n assert next(data_iter) == {\"foo\": \"bar1\"}\n assert next(data_iter) == {\"foo\": \"bar2\"}\n assert next(data_iter) == {\"foo\": \"bar3\"}\n assert next(data_iter) == {\"foo\": \"bar4\"}\n with pytest.raises(StopIteration):\n next(data_iter)\n\n\ndef test__reversed__():\n data = [{\"foo\": f\"bar{i}\"} for i in range(5)]\n ds = pybotters.store.DataStore(keys=[\"foo\"], data=data)\n if sys.version_info.major == 3 and sys.version_info.minor >= 8:\n data_iter = reversed(ds)\n assert next(data_iter) == {\"foo\": \"bar4\"}\n assert next(data_iter) == {\"foo\": \"bar3\"}\n assert next(data_iter) == {\"foo\": \"bar2\"}\n assert next(data_iter) == {\"foo\": \"bar1\"}\n assert next(data_iter) == {\"foo\": \"bar0\"}\n with pytest.raises(StopIteration):\n next(data_iter)\n else:\n with pytest.raises(TypeError):\n data_iter = reversed(ds)\n\n\ndef test_set():\n ds = pybotters.store.DataStore()\n event = asyncio.Event()\n ds._events[event] = []\n data = [{\"dummy1\": \"data1\"}, {\"dummy2\": \"data2\"}, {\"dummy3\": \"data3\"}]\n ds._set(data)\n assert all(e.is_set() for e in ds._events)\n assert ds._events[event] == data\n\n\n@pytest.mark.asyncio\nasync def test_wait_set():\n data = [{\"dummy\": \"data\"}]\n ret = {}\n\n class DataStoreHasDummySet(pybotters.store.DataStore):\n async def _set(self, data) -> None:\n return super()._set(data)\n\n async def wait_func(ds):\n ret[\"val\"] = await ds.wait()\n\n ds0 = DataStoreHasDummySet()\n t_wait0 = asyncio.create_task(wait_func(ds0))\n t_set0 = asyncio.create_task(ds0._set(data))\n await asyncio.wait_for(t_wait0, timeout=5.0)\n assert t_set0.done()\n assert data == ret[\"val\"]\n\n\n@pytest.mark.asyncio\nasync def test_wait_insert():\n data = [{\"dummy\": \"data\"}]\n ret = {}\n\n class DataStoreHasDummyInsert(pybotters.store.DataStore):\n async def _insert(self, data) -> None:\n return super()._insert(data)\n\n async def wait_func(ds):\n ret[\"val\"] = await ds.wait()\n\n ds1 = DataStoreHasDummyInsert()\n t_wait1 = asyncio.create_task(wait_func(ds1))\n t_set1 = asyncio.create_task(ds1._insert(data))\n await asyncio.wait_for(t_wait1, timeout=5.0)\n assert t_set1.done()\n assert data == ret[\"val\"]\n\n\n@pytest.mark.asyncio\nasync def test_wait_update():\n data = [{\"dummy\": \"data\"}]\n ret = {}\n\n class DataStoreHasDummyUpdate(pybotters.store.DataStore):\n async def _update(self, data) -> None:\n return super()._update(data)\n\n async def wait_func(ds):\n ret[\"val\"] = await ds.wait()\n\n ds2 = DataStoreHasDummyUpdate()\n t_wait2 = asyncio.create_task(wait_func(ds2))\n t_set2 = asyncio.create_task(ds2._update(data))\n await asyncio.wait_for(t_wait2, timeout=5.0)\n assert t_set2.done()\n assert data == ret[\"val\"]\n\n\n@pytest.mark.asyncio\nasync def test_wait_delete():\n data = [{\"dummy\": \"data\"}]\n ret = {}\n\n class DataStoreHasDummyDelete(pybotters.store.DataStore):\n async def _delete(self, data) -> None:\n return super()._delete(data)\n\n async def wait_func(ds):\n ret[\"val\"] = await ds.wait()\n\n ds3 = DataStoreHasDummyDelete()\n t_wait3 = asyncio.create_task(wait_func(ds3))\n t_set3 = asyncio.create_task(ds3._delete(data))\n await asyncio.wait_for(t_wait3, timeout=5.0)\n assert t_set3.done()\n assert data == ret[\"val\"]\n","repo_name":"MtkN1/pybotters","sub_path":"tests/test_store.py","file_name":"test_store.py","file_ext":"py","file_size_in_byte":11707,"program_lang":"python","lang":"en","doc_type":"code","stars":323,"dataset":"github-code","pt":"52"} +{"seq_id":"2473001235","text":"import hashlib\n\ndef get_substr_hash(st: str) -> list:\n \"\"\" Функция определяет все возможные подстроки в заданной строке.\n На входе: строка.\n На выходе: массив с подстроками\n \"\"\"\n\n len_str = len(st)\n\n hash_lst = list()\n\n for i in range(len(st) + 1):\n for j in range(i + 1, len(st) + 1):\n hash = hashlib.sha1(st[i:j].encode('utf-8')).hexdigest()\n hash_lst.append(hash)\n\n return hash_lst\n\n\nmy_str = 'abc'\nh_l = get_substr_hash(my_str)\n\nprint(f'\\nЗаданная строка: {my_str}')\nprint(f'\\nКоличество возможных подстрок: {len(h_l)}')\n\n# Если хэши подстрок не нужны, можно из функции сразу же вернуть длину итогового списка -\n# количество подстрок в заданной строке.\n","repo_name":"dimkh/GeekBrains","sub_path":"course06/hw08/task01.py","file_name":"task01.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"21376365760","text":"\"\"\"\n Database Models\n\"\"\"\nfrom django.conf import settings\nfrom django.core.mail import send_mass_mail\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.db.models import query\nfrom django.db.models.deletion import CASCADE, SET_NULL\nfrom django.urls import reverse\n\n\nclass BaseEntity(models.Model):\n \"\"\"An abstract model which allows all other models to inherit its characteristics.\n Gives every other model a field for the date it was created and the date it was updated.\"\"\"\n created_on = models.DateTimeField(auto_now_add=True)\n updated_on = models.DateTimeField(auto_now=True, blank=True, null=True)\n user = models.ForeignKey(User, blank=True, null=True, on_delete=models.SET_NULL)\n\n class Meta:\n abstract = True\n\n\nclass Sector(BaseEntity):\n \"\"\"Metadata sector model.\"\"\"\n name = models.CharField(max_length=20)\n description = models.TextField(blank=True, null=True)\n\n def __str__(self):\n return self.name\n\n\nclass Theme(BaseEntity):\n \"\"\"Metadata theme model. Belongs to sectors.\"\"\"\n sector = models.ForeignKey(Sector, models.PROTECT)\n name = models.CharField(max_length=50)\n\n def __str__(self):\n return self.name\n\n\nclass Tag(BaseEntity):\n \"\"\"A tag model for queries.\"\"\"\n name = models.CharField(max_length=255)\n\n def __str__(self):\n return self.name\n\n\nclass Source(BaseEntity):\n \"\"\"Metadata data source.\"\"\"\n indicator = models.TextField()\n indicator_acronym = models.CharField(max_length=10, blank=True, null=True)\n source = models.TextField()\n source_acronym = models.CharField(max_length=10, blank=True, null=True)\n source_url = models.TextField(blank=True, null=True)\n download_path = models.TextField(blank=True, null=True)\n last_updated_on = models.DateTimeField(auto_now=True)\n schema = models.TextField(blank=True, null=True)\n storage_type = models.TextField(blank=True, null=True)\n active_mirror_name = models.TextField(blank=True, null=True)\n description = models.TextField(blank=True, null=True)\n tags = models.ManyToManyField(Tag)\n\n def __str__(self):\n return \"{} from {}\".format(self.indicator_acronym, self.source_acronym)\n\n\nclass SourceColumnMap(BaseEntity):\n \"\"\"Column mapping for datasets.\"\"\"\n DATA_TYPE_CHOICES = (\n (\"C\", \"Character\"),\n (\"N\", \"Numeric\")\n )\n data_type = models.CharField(max_length=1, choices=DATA_TYPE_CHOICES, blank=True, null=True)\n source = models.ForeignKey(Source, models.PROTECT, blank=True, null=True)\n name = models.TextField()\n description = models.TextField(blank=True, null=True)\n alias = models.TextField(blank=True, null=True)\n source_name = models.TextField()\n\n def __str__(self):\n return self.name\n\n class Meta:\n unique_together = (('source', 'name'),)\n\n\nclass Operation(BaseEntity):\n \"\"\"\n This is the base model on which a query is built. It stores all of the meta-data for a query\n \"\"\"\n ALIAS_STATUS_CHOICES = (\n ('d', 'Done'),\n ('p', 'Pending'),\n ('e', 'Error'),\n )\n\n name = models.TextField()\n description = models.TextField(blank=True, null=True)\n operation_query = models.TextField(blank=True, null=True)\n theme = models.ForeignKey(Theme, models.SET_NULL, blank=True, null=True)\n sample_output_path = models.TextField(blank=True, null=True)\n tags = models.ManyToManyField(Tag)\n is_draft = models.BooleanField(default=True)\n is_raw = models.BooleanField(default=False) # if true, operation query is taken as is without processing steps or advanced configs\n row_count = models.IntegerField(blank=True, null=True)\n # controls whether to count rows in the post_save signal\n count_rows = models.BooleanField(default=False)\n alias_creation_status = models.CharField(default='d', choices=ALIAS_STATUS_CHOICES, blank=True, max_length=1)\n \"\"\"\n The logs field can hold any information you deem undeserving of a field of its own\n e.g. error, warning or info messages\n Default structure:\n {\"type\": \"[error, warning, info]\", \"message\": \"\", \"\"}\n \"\"\"\n logs = models.JSONField(blank=True, null=True, default=dict)\n \"\"\"\n Holds the JSON options for what we called the Advanced Query Builder.\n Management code is under query_builder.advanced\n \"\"\"\n advanced_config = models.JSONField(blank=True, null=True, default=dict)\n last_accessed = models.DateTimeField(auto_now_add=True, null=True)\n renewal_sent = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n def export_data(self):\n return reverse('export_stream', args=[self.pk])\n\n def get_operation_steps(self):\n return self.operationstep_set.order_by('step_id')\n\n def get_aliases(self):\n return self.operationdatacolumnalias_set.all()\n\n\nclass OperationStep(BaseEntity):\n \"\"\"These are the individual steps in a query.\"\"\"\n operation = models.ForeignKey(Operation, models.CASCADE)\n step_id = models.SmallIntegerField()\n name = models.CharField(max_length=200)\n description = models.TextField(blank=True, null=True)\n query_func = models.TextField(blank=True, null=True)\n query_kwargs = models.TextField(blank=True, null=True)\n source = models.ForeignKey(Source, models.SET_NULL, blank=True, null=True)\n # check operation for info on logs field\n logs = models.JSONField(blank=True, null=True, default=dict)\n\n def __str__(self):\n return self.name\n\n class Meta:\n unique_together = (('operation', 'step_id'),)\n\n\nclass OperationDataColumnAlias(models.Model):\n \"\"\"Alternative titles/names for data columns returned by an operation\"\"\"\n operation = models.ForeignKey(Operation, models.CASCADE)\n column_name = models.TextField()\n column_alias = models.TextField()\n\n def __str__(self):\n return '{} - {}'.format(self.column_name, self.column_alias)\n\n class Meta:\n unique_together = (('operation', 'column_name', 'column_alias'))\n\n\nclass Review(BaseEntity):\n \"\"\"A model to allow users to review other queries?\"\"\"\n operation = models.ForeignKey(Operation, models.DO_NOTHING, blank=True, null=True)\n rating = models.SmallIntegerField()\n comment = models.TextField(blank=True, null=True)\n\n def __str__(self):\n return \"Review of {} by {}\".format(self.operation, self.user)\n\n\nclass UpdateHistory(BaseEntity):\n \"\"\"Update history model for datasets.\"\"\"\n source = models.ForeignKey(Source, models.PROTECT)\n history_table = models.TextField(blank=True, null=True)\n released_on = models.DateTimeField(auto_now_add=True)\n release_description = models.TextField(blank=True, null=True)\n\n def __str__(self):\n return \"Update of {} on {}\".format(self.source, self.released_on)\n\n class Meta:\n verbose_name_plural = \"Update histories\"\n\n\nclass AuditLogEntry(models.Model):\n \"\"\"Consolidated audit log model. Should keep track of every change on every internal model.\"\"\"\n CREATE = 0\n UPDATE = 1\n DELETE = 2\n\n action_choices = (\n (CREATE, \"create\"),\n (UPDATE, \"update\"),\n (DELETE, \"delete\"),\n )\n\n timestamp = models.DateTimeField(auto_now_add=True, blank=True, null=True)\n user = models.ForeignKey(User, blank=True, null=True, on_delete=models.SET_NULL)\n action = models.PositiveSmallIntegerField(choices=action_choices, blank=True, null=True)\n object_id = models.BigIntegerField(blank=True, null=True)\n object_str = models.CharField(max_length=255)\n object_ctype = models.CharField(max_length=255)\n\n def __str__(self):\n if self.action == AuditLogEntry.CREATE:\n fstring = \"Created {}: {}\"\n elif self.action == AuditLogEntry.UPDATE:\n fstring = \"Updated {}: {}\"\n elif self.action == AuditLogEntry.DELETE:\n fstring = \"Deleted {}: {}\"\n else:\n fstring = \"Logged {}: {}\"\n\n return fstring.format(self.object_ctype, self.object_str)\n\n class Meta:\n verbose_name_plural = \"Audit log entries\"\n\n\nclass ScheduledEvent(BaseEntity):\n \"\"\"Scheduled Event Class.\"\"\"\n interval_type_choices = (\n ('min', 'Minutes'),\n ('sec', 'Seconds'),\n ('hrs', 'Hours'),\n ('dys', 'Days'),\n ('wks', 'Weeks'),\n ('mnt', 'Months'),\n ('yrs', 'Years'),\n )\n expected_runtime_type_choices = (\n ('min', 'Minutes'),\n ('sec', 'Seconds'),\n ('hrs', 'Hours'),\n )\n name = models.TextField(null=False, blank=False)\n description = models.TextField(null=True, blank=True)\n script_name = models.TextField(null=False, blank=False)\n enabled = models.BooleanField(default=True)\n start_date = models.DateTimeField(null=False, blank=False)\n repeat = models.BooleanField(default=False)\n interval = models.BigIntegerField(blank=True, null=True)\n interval_type = models.CharField(\n max_length=3, choices=interval_type_choices, null=True, blank=True)\n expected_runtime = models.BigIntegerField(blank=True, null=True)\n expected_runtime_type = models.CharField(\n max_length=3, choices=expected_runtime_type_choices, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n def send_emails(self, subject, message, recipient_list):\n message_payload = (subject, message, settings.EMAIL_HOST_USER, recipient_list)\n send_mass_mail((message_payload, ), fail_silently=False)\n\n\nclass ScheduledEventRunInstance(BaseEntity):\n \"\"\"Scheduled Event Run Instances.\"\"\"\n\n status_choices = (\n ('p', 'Pending'),\n ('r', 'Running'),\n ('c', 'Completed'),\n ('e', 'Errored'),\n ('s', 'Skipped'),\n )\n scheduled_event = models.ForeignKey(ScheduledEvent, on_delete=models.CASCADE)\n start_at = models.DateTimeField(null=False, blank=False)\n ended_at = models.DateTimeField(null=True, blank=True)\n status = models.CharField(max_length=1, choices=status_choices, default='p')\n logs = models.TextField(blank=True, null=True)\n\n def __str__(self):\n return self.scheduled_event.name + ' - ' + self.status\n\n\nclass FrozenData(BaseEntity):\n \"\"\"Stores table names for \"frozen\" data\"\"\"\n status_choices = (\n ('p', 'Pending'),\n ('r', 'Running'),\n ('c', 'Completed'),\n ('e', 'Errored'),\n )\n\n parent_db_table = models.CharField(max_length=200, null=False)\n frozen_db_table = models.CharField(max_length=200, null=False)\n status = models.CharField(max_length=1, choices=status_choices, default='p')\n active = models.BooleanField(default=True)\n description = models.CharField(max_length=200, null=False)\n logs = models.TextField(blank=True, null=True)\n last_accessed = models.DateTimeField(auto_now_add=True, null=True)\n deletion_date = models.DateTimeField(null=True)\n renewal_sent = models.BooleanField(default=False)\n\n def __str__(self):\n status = [choice[1] for choice in self.status_choices if choice[0] == self.status]\n if self.frozen_db_table:\n return self.frozen_db_table + ' - ' + status[0]\n return self.parent_db_table + ' - ' + status[0]\n\n\nclass SavedQueryData(BaseEntity):\n \"\"\"Borrows heavily from FrozenData to store query sets \"\"\"\n status_choices = (\n ('p', 'Pending'),\n ('r', 'Running'),\n ('c', 'Completed'),\n ('e', 'Errored'),\n )\n saved_query_db_table = models.CharField(max_length=200, null=True)\n active = models.BooleanField(default=True)\n operation = models.ForeignKey(Operation, on_delete=models.CASCADE)\n full_query = models.TextField(null=False)\n status = models.CharField(max_length=1, choices=status_choices, default='p')\n description = models.CharField(max_length=200, null=False)\n logs = models.TextField(blank=True, null=True)\n last_accessed = models.DateTimeField(auto_now_add=True, null=True)\n renewal_sent = models.BooleanField(default=False)\n\n def __str__(self):\n status = [choice[1] for choice in self.status_choices if choice[0] == self.status]\n if self.saved_query_db_table:\n return self.saved_query_db_table + ' - ' + status[0]\n return self.operation.name + ' - ' + status[0]\n\n\nclass ETLQuery(BaseEntity):\n \"\"\"Holds queries that will be run after running the ETL processes to create a new frozen dataset\"\"\"\n\n ETL_PROCESS_CHOICES = (\n (\"IATI\", \"IATI Data\"),\n (\"FTS\", \"FTS Data\"),\n (\"Others\", \"Others\"),\n )\n\n class Meta:\n verbose_name = 'ETL Query'\n verbose_name_plural = 'ETL Queries'\n\n query = models.ForeignKey(Operation, on_delete=CASCADE)\n etl_process = models.CharField(max_length=20, choices=ETL_PROCESS_CHOICES, null=False) # e.g IATI, FTS\n saved_dataset = models.OneToOneField(SavedQueryData, on_delete=SET_NULL, null=True, blank=True)\n active = models.BooleanField(default=True)\n\n def __str__(self) -> str:\n return self.etl_process + '_' + str(self.query)\n","repo_name":"devinit/ddw-analyst-ui","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":12957,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"33819531043","text":"#!/usr/local/bin/python3.6\n# coding:utf-8\n# @Time : 2019/8/1 17:09\n# @Author : ZhangBing\n# @Email : 18829272841@163.com\n# @File : gramcache.py\n# @Software: PyCharm\n# 代码缓存相关配置\nimport time\nimport datetime\nimport inspect\nimport functools\n\n\n# 添加时间管理配置\ndef times(fn):\n @functools.wraps(fn)\n def __wapper(*args, **kwargs):\n start_time = datetime.datetime.now()\n ret = fn(*args, **kwargs)\n print(\"{} 函数执行时间为:{}\".format(fn.__name__, (datetime.datetime.now() - start_time).total_seconds()))\n return ret\n\n return __wapper\n\n\n# 添加缓存管理函数\ndef cache_local(tim):\n def __cache_local(fn):\n cache_dict = {}\n\n @functools.wraps(fn)\n def __wapper(*args, **kwargs):\n buffer = []\n for k, v in cache_dict.items():\n _, dat = v\n if (datetime.datetime.now().timestamp() - dat) >= tim: # 此处字典不能被删除,因为其是在运行状态,若删除,则会报错\n buffer.append(k)\n for i in buffer:\n cache_dict.pop(i)\n param_keys = {}\n param = inspect.signature(fn).parameters # 获取字典\n lst_param = tuple(param.keys()) # 此处存储形参集合\n for k, v in enumerate(args):\n param_keys[lst_param[k]] = v\n param_keys.update(kwargs)\n for i in lst_param:\n if i not in param_keys.keys():\n param_keys[i] = param[i].default\n key = tuple(sorted(param_keys.items())) # 此处返回形参和实参的元组的集合\n if key not in cache_dict.keys():\n ret = fn(*args, **kwargs)\n cache_dict[key] = (ret, datetime.datetime.now().timestamp())\n return cache_dict[key]\n\n return __wapper\n\n return __cache_local\n\n\nif __name__ == \"__main__\":\n @times\n @cache_local(5)\n def add(x, y, z=10):\n time.sleep(3)\n return x + y + z\n\n\n print(add(3, 4))\n print('------------------')\n print(add(3, 4))\n\n print(add(3, 4, z=10))\n print('*********************')\n time.sleep(5)\n print(add(3, 4, z=10))\n","repo_name":"changpaozhe/python","sub_path":"gram/gramcache.py","file_name":"gramcache.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"44100888496","text":"from pickle import load\r\nfrom os.path import splitext, exists\r\nimport networkx\r\nfrom sknetwork.ranking import Closeness\r\nfrom numpy import argmax\r\nfrom multiprocessing import Pool\r\nfrom itertools import islice\r\nfrom typing import Generator\r\n\r\ndef chunks(l: list, n: int) -> Generator:\r\n \"\"\"Divide a list of nodes `l` in `n` chunks\"\"\"\r\n l_c = iter(l)\r\n while 1:\r\n x = tuple(islice(l_c, n))\r\n if not x:\r\n return\r\n yield x\r\n\r\ndef betweenness_centrality_parallel(G, processes=None):\r\n \"\"\"Parallel betweenness centrality function\"\"\"\r\n p = Pool(processes=processes)\r\n node_divisor = len(p._pool) * 4\r\n node_chunks = list(chunks(G.nodes(), G.order() // node_divisor))\r\n num_chunks = len(node_chunks)\r\n bt_sc = p.starmap(\r\n networkx.betweenness_centrality_subset,\r\n zip(\r\n [G] * num_chunks,\r\n node_chunks,\r\n [list(G)] * num_chunks,\r\n [True] * num_chunks,\r\n [None] * num_chunks,\r\n ),\r\n )\r\n\r\n # Reduce the partial solutions\r\n bt_c = bt_sc[0]\r\n for bt in bt_sc[1:]:\r\n for n in bt:\r\n bt_c[n] += bt[n]\r\n return bt_c\r\n\r\ndef eigenvector(G):\r\n eigenvector = networkx.eigenvector_centrality(G)\r\n max_eigenvector = max(eigenvector, key=eigenvector.get)\r\n print(f'Самый центральный по собственному значению: {max_eigenvector}')\r\n \r\ndef closeness(G, graph):\r\n sparse = networkx.to_scipy_sparse_matrix(G)\r\n keys = tuple(graph.keys())\r\n closeness = Closeness(method = 'approximate', n_jobs = 8)\r\n max_closeness = argmax(closeness.fit_transform(sparse))\r\n print(f'Самый центральный по близости: {keys[max_closeness]}')\r\n \r\ndef betweenness(G):\r\n betweenness = betweenness_centrality_parallel(G, 4)\r\n max_betweenness = max(betweenness, key=betweenness.get)\r\n print(f'Самый центральный по посредничеству: {max_betweenness}')\r\n\r\nif __name__ == '__main__':\r\n filename = input('Введите название файла с рёбрами графа: ')\r\n with open(filename, 'rb') as file:\r\n graph = load(file)\r\n\r\n G = networkx.Graph()\r\n for start, nodes in graph.items():\r\n for end in nodes:\r\n G.add_edge(start, end)\r\n\r\n leaves = [node for node, degree in G.degree() if degree < 2]\r\n G.remove_nodes_from(leaves)\r\n print(f'Удалено {len(leaves)} листьев')\r\n print(f'В графе {G.number_of_nodes()} узлов и {G.number_of_edges()} рёбер')\r\n\r\n eigenvector(G)\r\n closeness(G, graph)\r\n betweenness(G)\r\n","repo_name":"yinozemcev/ISIT","sub_path":"graph/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"42498213324","text":"from __future__ import absolute_import, print_function\n\nimport tweepy\n\nfrom tweepy import OAuthHandler\n\n# from auth import consumer_key,consumer_secret,access_token,access_token_secret\n\nfrom timelineThread import timelineThread\n\nimport json\n\nimport couchdb\n\nfrom dblogin import user, password\n\nimport sys\nconsumer_key=\"x4YMdEwhO5T7gzp8wyoPOSRzA\"\nconsumer_secret=\"yLDAKyghTdUDxlK0x6NXeuyIBXPBpj6uHd0nvLnL4UJmcZLZGf\"\naccess_token=\"1253888897722925056-yCYIwGQtf2TZ6I8i6dnEfYGEsIzUgZ\"\naccess_token_secret=\"IU32uJX5nCZ3pE0vbqkl7nDGvjYO7vAXgfnJMmBiGtAgV\"\n\n\n\nclass dbTwitterSearch():\n def __init__ (self,api,db=None):\n self.api=api\n self.db=db\n self.count=0\n self.max_tweets=100\n\n def twitter_search(self,country):\n places=self.api.geo_search(query=str(country),granularity=\"country\")\n if len(places)==1:\n place_id=places[0].id\n else:\n print(\"error: more than one country {0}\".format(places))\n return\n tweet_batch_num=1\n while True:\n searched_tweets = api.search(q=\"place:{0}\".format(place_id, count=self.max_tweets))\n start_index=0\n print(\"{0}: Num of tweets: {1}\".format(tweet_batch_num,len(searched_tweets)))\n for i in searched_tweets:\n tweet=i._json\n if tweet:\n print(tweet)\n if \"id\" in tweet and \"text\" in tweet and \"id_str\" in tweet:\n self.count += 1\n user = tweet[\"user\"][\"screen_name\"]\n \"\"\"\n timeline search\n \"\"\"\n # t = timelineThread(self.count,self.api,user,self.db)\n # t.start()\n try:\n print(\"%s: %s\\n\" % (tweet[\"user\"][\"screen_name\"], tweet[\"full_text\"]))\n except Exception:\n print(\"%s: %s\\n\" % (tweet[\"user\"][\"screen_name\"], tweet[\"text\"]))\n else:\n print(\"Received a responce that is not a tweet\\n\")\n print(tweet)\n if self.count>=10:\n print(\"finish\\n\")\n sys.exit(0) \n\nif __name__ == \"__main__\":\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth)\n searcher=dbTwitterSearch(api)\n searcher.twitter_search(\"Australia\")\n","repo_name":"Jeremypan/Project_Cloud_Computing_and_Cluster","sub_path":"src/haverster/searchTimeline.py","file_name":"searchTimeline.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"38656151045","text":"import os\nimport sys\nimport numpy as np\nimport torch.utils.data as data_utils\nimport cv2\nimport random\nimport pickle\nsys.path.insert(0, './data')\nsys.path.append('../../')\nsys.path.append('../')\nimport utils\nimport json\nimport torch\n\n\ndef list_to_dict(l):\n d = {}\n for entry in l:\n d[entry] = 1\n return d\n\ndef gibson_get_continuous_action(pos, ori):\n actions = []\n for i in range(len(pos)-1):\n pos_diff = (pos[i+1]-pos[i])[:2]\n rot = np.array([[np.cos(ori[i][2]), -np.sin(ori[i][2])],\n [np.sin(ori[i][2]), np.cos(ori[i][2])]])\n pos_diff = np.dot(np.transpose(rot), np.array(pos_diff))\n\n yaw = ori[i + 1][2] - ori[i][2]\n\n ## handle transitions from pi to -pi and 0 to -0 (orientation values range from (-pi, pi)\n if ori[i][2] > 0 and ori[i + 1][2] < 0:\n if abs(ori[i][2]) > 1.57 and abs(ori[i + 1][2]) > 1.57:\n yaw = (3.141593 - ori[i][2]) + (3.141593 + ori[i + 1][2])\n elif abs(ori[i][2]) < 1.57 and abs(ori[i + 1][2]) < 1.57:\n yaw = -(ori[i][2] + abs(ori[i + 1][2]))\n else:\n print('wrong orientation values?')\n exit(-1)\n elif ori[i][2] < 0 and ori[i + 1][2] > 0:\n if abs(ori[i][2]) > 1.57 and abs(ori[i + 1][2]) > 1.57:\n yaw = -((3.141593 + ori[i][2]) + (3.141593 - ori[i + 1][2]))\n elif abs(ori[i][2]) < 1.57 and abs(ori[i + 1][2]) < 1.57:\n yaw = abs(ori[i][2]) + ori[i + 1][2]\n else:\n print('wrong orientation values? 2')\n exit(-1)\n\n # manual scaling so that they are in more reasonable range\n actions.append([pos_diff[0] * 10, pos_diff[1] * 10, yaw* 5])\n actions = np.array(actions, dtype=np.float32)\n return actions\n\n\ndef get_custom_dataset(opts=None, set_type=0, force_noshuffle=False, getLoader=True, num_workers=1):\n\n def collate_fn(batch):\n batch = list(filter(lambda x: x is not None, batch))\n return torch.utils.data.dataloader.default_collate(batch)\n\n dataset = []\n\n shuffle = True if set_type == 0 else False\n shuffle = True if opts.play else shuffle\n\n if force_noshuffle:\n shuffle = False\n\n for tmp in opts.data.split('-'):\n curdata, datadir = tmp.split(':')\n dataset.append(generic_dataset(opts, set_type=set_type, name=curdata, datadir=datadir))\n\n if getLoader:\n dloader = []\n for dset in dataset:\n dloader.append(data_utils.DataLoader(dset, batch_size=opts.bs,\n num_workers=num_workers, pin_memory=True, shuffle=shuffle, drop_last=True, collate_fn=collate_fn))\n if len(dataset) == 1 and not opts.test:\n return dloader[0]\n return dloader\n else:\n return dataset\n\n\n\n\nclass generic_dataset(data_utils.Dataset):\n\n def __init__(self, opts, start=0, end=0, set_type=0, name='', datadir=''):\n self.opts = opts\n self.set_type = set_type\n self.samples = []\n self.name = name\n self.layout_memory = utils.check_arg(self.opts, 'layout_memory')\n self.continuous_action = utils.check_arg(self.opts, 'continuous_action')\n self.predict_logvar = utils.check_arg(self.opts, 'predict_logvar')\n self.learn_interpolation = utils.check_arg(self.opts, 'learn_interpolation')\n self.no_duplicate = utils.check_arg(self.opts, 'no_duplicate')\n train = True if set_type == 0 else False\n if 'gibson' in opts.data or 'carla' in opts.data:\n if 'gibson' in opts.data:\n try:\n train_keys, val_keys, tst_keys = pickle.load(open('gibson_data_split.pkl', 'rb'))\n except:\n train_keys, val_keys, tst_keys = pickle.load(open('../gibson_data_split.pkl', 'rb'))\n else:\n try:\n train_keys, val_keys, tst_keys = pickle.load(open('carla_data_split.pkl', 'rb'))\n except:\n train_keys, val_keys, tst_keys = pickle.load(open('../carla_data_split.pkl', 'rb'))\n\n train_keys = list_to_dict(train_keys)\n val_keys = list_to_dict(val_keys)\n tst_keys = list_to_dict(tst_keys)\n\n paths = []\n root_dirs = datadir\n for datadir in root_dirs.split(','):\n for fname in os.listdir(datadir):\n cur_file = os.path.join(datadir, fname)\n if not '.npy' in fname:\n continue\n\n key = fname.split('.')[0]\n key = key.replace('_', '/')\n do = False\n if (train and key in train_keys) or (not train and key in val_keys) or (opts.test and key in tst_keys):\n do = True\n if not do:\n continue\n paths.append([key, cur_file])\n\n elif 'pilotnet' in opts.data:\n if '8hz' in opts.data:\n self.pilotnet_actions = pickle.load(open('8hz_all_actions.pkl', 'rb'))\n train_keys, val_keys, tst_keys = pickle.load(open('pilotnet8hz_paths_and_count.p', 'rb'))\n else:\n # 16hz\n self.pilotnet_actions = pickle.load(open('16hz_all_actions.pkl', 'rb'))\n train_keys, val_keys, tst_keys = pickle.load(open('pilotnet16hz_paths_and_count.p', 'rb'))\n\n paths = []\n root_dirs = datadir\n nn = 0\n random.seed(4)\n for datadir in root_dirs.split(','):\n\n fnames = os.listdir(datadir)\n for fname in fnames:\n key_dict = None\n cur_file = os.path.join(datadir, fname)\n\n key = fname.split('.')[0]\n do = False\n is_train = False\n if (train and (key in train_keys)):\n do = True\n key_dict = train_keys\n\n if key in train_keys:\n is_train = True\n\n if (not train and key in val_keys):\n do = True\n key_dict = val_keys\n if is_train:\n print(key)\n nn+= 1\n if (opts.test and key in tst_keys):\n do = True\n key_dict = tst_keys\n\n if key_dict is None and key in train_keys:\n key_dict = train_keys\n\n if not do:\n continue\n\n\n pid = key.split('_')[0]\n if not pid in self.pilotnet_actions:\n print(pid + ' not in pilotnet_actions file')\n continue\n if self.no_duplicate:\n obj_count = 1\n else:\n obj_count = key_dict[key]['obj_count']\n for _ in range(obj_count):\n paths.append([key, cur_file])\n\n random.Random(4).shuffle(paths)\n if utils.check_arg(self.opts, 'num_chunk') and self.opts.num_chunk > 0:\n num_chunk = self.opts.num_chunk\n cur_ind = self.opts.cur_ind\n chunk_size = len(paths) // num_chunk\n if cur_ind == num_chunk-1:\n paths = paths[cur_ind*chunk_size:]\n else:\n paths = paths[cur_ind*chunk_size:(cur_ind+1)*chunk_size]\n\n tmp = np.load(paths[0][1], allow_pickle=True).item()\n\n opts.spatial_d = tmp['spatial_mu'].shape[1]\n opts.spatial_h = tmp['spatial_mu'].shape[2]\n opts.spatial_w = tmp['spatial_mu'].shape[3]\n opts.theme_d = tmp['theme_mu'].shape[1]\n opts.separate_holistic_style_dim = opts.theme_d\n opts.spatial_dim = opts.spatial_h\n\n opts.spatial_total_dim = opts.spatial_h * opts.spatial_w * opts.spatial_d\n\n\n self.samples = paths\n print('\\n\\n----numData: ' + str(len(paths))+ '\\n\\n')\n\n\n\n def parse_action(self, data, cur_a):\n if 'action_space' in data:\n num_actions = data['action_sapce']\n elif 'gibson' in self.name:\n num_actions = 9\n if self.continuous_action:\n action = [0] * self.opts.action_space\n for i in range(len(cur_a)):\n action[i] = cur_a[i]\n return np.asarray(action).astype('float32'), -1\n else:\n cur_a = gibson_get_action(cur_a)\n\n elif 'pilotnet' in self.name:\n if self.continuous_action:\n action = [0] * self.opts.action_space\n for i in range(len(cur_a)):\n action[i] = cur_a[i]\n return np.asarray(action).astype('float32'), -1\n else:\n print('continouse action pilotnet not supported')\n exit(-1)\n elif 'carla' in self.name:\n if self.continuous_action:\n action = [0] * self.opts.action_space\n for i in range(len(cur_a)):\n action[i] = cur_a[i]\n\n return np.asarray(action).astype('float32'), -1\n else:\n cur_a = carla_get_action(cur_a[0])\n num_actions = 13\n else:\n num_actions = 10\n action = [0] * self.opts.action_space\n action[cur_a] = 1\n a_t = np.asarray(action).astype('float32')\n return a_t, num_actions\n\n def load_gibson(self, data):\n if self.continuous_action:\n actions = gibson_get_continuous_action(data['np_pos'], data['np_ori'])\n data['np_action'] = actions\n data['np_img_state'] = data['np_img_state'][:len(actions)]\n if 'np_img_logvar' in data:\n data['np_img_logvar'] = data['np_img_logvar'][:len(actions)]\n\n return data\n\n def load_carla(self, fn):\n data = np.load(fn, allow_pickle=True).item()\n if self.continuous_action:\n\n # normalize mean 0 std 1\n actions = []\n for ind in range(len(data['data'])):\n speed = (data['data'][ind]['speed'] - 18.2) / 3.62\n # + right - left\n yaw = (data['data'][ind]['angular_velocity'][2] - (-0.40)) / 20.45\n actions.append(np.array([yaw, speed]))\n data['np_action'] = np.array(actions).astype('float32')\n\n return data\n\n def __len__(self):\n return len(self.samples)\n\n\n def __getitem__(self, idx):\n fn = self.samples[idx]\n key = fn[0]\n try:\n if 'carla' in self.opts.data:\n data = self.load_carla(fn[1])\n else:\n data = np.load(fn[1], allow_pickle=True).item()\n except:\n print('dataloader error: ')\n print(fn)\n return None\n\n len_episode = len(data['spatial_mu'])\n\n if 'gibson' in self.opts.data:\n data = self.load_gibson(data)\n elif 'pilotnet' in self.opts.data:\n if key.startswith('ind'):\n pid = key.split('#')[1]\n starting_index = key.split('#')[-1].split('.')[0]\n elif key.startswith('pn-meta'):\n pid = key.split('_')[0]\n starting_index = key.split('_')[-1].split('.')[0]\n starting_index = int(starting_index)\n action = self.pilotnet_actions[pid][starting_index:starting_index+len_episode+1]\n data['np_action'] = action\n\n states, actions, neg_actions, rand_actions, img_key = [], [], [], [], 'np_img_state'\n\n data[img_key] = np.concatenate([data['spatial_mu'].reshape(data['spatial_mu'].shape[0], self.opts.spatial_total_dim),\n data['theme_mu']], axis=1)\n\n\n ep_len = len_episode - self.opts.num_steps\n if self.opts.test:\n start_pt = 0 ## start from the first screen for testing\n if 'carla' in self.opts.data and self.learn_interpolation:\n start_pt = 20\n else:\n start_pt = random.randint(0, ep_len)\n\n i = 0\n while i < self.opts.num_steps:\n if start_pt + i >= len(data[img_key]):\n cur_s = data[img_key][len(data[img_key]) - 1]\n cur_a = data['np_action'][len(data[img_key]) - 1]\n else:\n cur_s = data[img_key][start_pt + i]\n cur_a = data['np_action'][start_pt + i]\n\n s_t = cur_s\n a_t, num_actions = self.parse_action(data, cur_a)\n\n # sample negative action within the episode\n rand_ind = random.randint(start_pt, start_pt+self.opts.num_steps - 1)\n while rand_ind == start_pt + i:\n rand_ind = random.randint(start_pt, start_pt+self.opts.num_steps - 1)\n false_a_t, _ = self.parse_action(data, data['np_action'][rand_ind])\n\n # save\n states.append(s_t)\n actions.append(a_t)\n neg_actions.append(false_a_t)\n i = i + 1\n\n del data\n return states, actions, neg_actions\n","repo_name":"nv-tlabs/DriveGAN_code","sub_path":"data/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":13217,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"4"} +{"seq_id":"14043579004","text":"#!/usr/bin/env python\n#encoding: utf8\n\nimport rospy\nfrom pimouse_keyboard_run.msg import String\n\ndef callback(data):\n rospy.loginfo(rospy.get_caller_id()+\"I heard %s\",data.data)\n\ndef listener():\n rospy.init_node('test_sub', anonymous=True)\n\n rospy.Subscriber(\"chatter\", String, callback)\n rospy.spin()\n\nif __name__ == '__main__':\n listener()\n","repo_name":"Yuto2511/pimouse_keyboard_run","sub_path":"scripts/test_sub.py","file_name":"test_sub.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"15868566399","text":"from uuid import UUID\nfrom typing import List, Optional\nfrom ninja import Field\nfrom ninja import Schema\nfrom ninja import ModelSchema\nfrom arkid.core.models import User, UserGroup\nfrom arkid.core.schema import ResponseSchema\nfrom arkid.core.translation import gettext_default as _\nfrom api.v1.pages.tenant_manage.child_manager import select_user_page, select_permission_page, select_scope_page\n\nclass ChildManagerListOut(ModelSchema):\n\n class Config:\n model = User\n model_fields = [\"id\",\"username\"]\n\n groups: str = Field(title=_('Group', '所属分组'))\n\n @staticmethod\n def resolve_groups(obj):\n usergroups = UserGroup.valid_objects.filter(\n users=obj\n )\n usergroup_str = ''\n for index, usergroup in enumerate(usergroups):\n usergroup_str = usergroup_str + usergroup.name\n if index < (len(usergroups)-1):\n usergroup_str = usergroup_str + ','\n return usergroup_str\n\n\nclass ChildManagerCreateInItem(Schema):\n id:UUID = Field(hidden=True)\n name:str\n\nclass ChildManagerUserCreateInItem(Schema):\n id:UUID = Field(hidden=True)\n username:str\n\nclass ChildManagerDeatilOut(Schema):\n\n permissions: Optional[List[ChildManagerCreateInItem]] = Field(\n # field=\"id\",\n page=select_permission_page.tag,\n # link=\"name\",\n # default=None,\n title=_(\"拥有权限\")\n )\n\n manager_scope: Optional[List[ChildManagerCreateInItem]] = Field(\n # field=\"id\",\n page=select_scope_page.tag,\n # link=\"name\",\n # default=None,\n title=_(\"管理范围\")\n )\n\nclass ChildManagerDeatilBaseOut(ResponseSchema):\n\n data: ChildManagerDeatilOut\n\nclass ChildManagerCreateSchemaIn(Schema):\n\n users: Optional[List[ChildManagerUserCreateInItem]] = Field(\n # field=\"id\",\n page=select_user_page.tag,\n # link=\"name\",\n # default=None,\n title=_(\"选择用户\")\n )\n\n permissions: Optional[List[ChildManagerCreateInItem]] = Field(\n # field=\"id\",\n page=select_permission_page.tag,\n # link=\"name\",\n # default=None,\n title=_(\"拥有权限\")\n )\n\n manager_scope: Optional[List[ChildManagerCreateInItem]] = Field(\n # field=\"id\",\n page=select_scope_page.tag,\n # link=\"name\",\n # default=None,\n title=_(\"管理范围\")\n )\n\nclass ChildManagerEditSchemaIn(Schema):\n\n users: Optional[List[ChildManagerUserCreateInItem]] = Field(\n # field=\"id\",\n page=select_user_page.tag,\n # link=\"name\",\n # default=None,\n title=_(\"选择用户\")\n )\n\n permissions: Optional[List[ChildManagerCreateInItem]] = Field(\n # field=\"id\",\n page=select_permission_page.tag,\n # link=\"name\",\n # default=None,\n title=_(\"拥有权限\")\n )\n\n manager_scope: Optional[List[ChildManagerCreateInItem]] = Field(\n # field=\"id\",\n page=select_scope_page.tag,\n # link=\"name\",\n # default=None,\n title=_(\"管理范围\")\n )","repo_name":"longguikeji/arkid","sub_path":"api/v1/schema/child_manager.py","file_name":"child_manager.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","stars":1454,"dataset":"github-code","pt":"4"} +{"seq_id":"18263497759","text":"class NavigatorWebElementDefinitions:\n\n configuration_tab = \\\n {\n 'XPATH': '//*[@data-automation-tag=\"top_configuration\"]',\n 'wait_for': 2\n }\n\n tools_tab = \\\n {\n 'XPATH': '//*[@href=\"#/tools\"]',\n 'wait_for': 2\n }\n\n auditing_tab = \\\n {\n 'XPATH': '//*[@data-automation-tag=\"top_auditing\"]',\n 'wait_for': 5\n }\n\n clients_tab = \\\n {\n 'XPATH': '//*[@data-automation-tag=\"top_nodes\"]',\n 'wait_for': 5\n }\n\n system_config_tab = \\\n {\n 'XPATH': '//*[@id=\"pf-sidebar-links\"]/ul/a[6]/span',\n 'wait_for': 2\n }\n\n policies_tab = \\\n {\n 'XPATH': '//span[contains(text(), \"Policies and Access Control\")]',\n 'wait_for': 2\n }\n\n system_configuration_menu = \\\n {\n 'XPATH': '//*[@href=\"#/configuration/system_configuration\"]'\n }\n\n menu_popup_icon = \\\n {\n 'XPATH': '//*[@id=\"__BVID__33__BV_toggle_\"]'\n }\n\n management_menu_popup = \\\n {\n 'XPATH': '//*[@href=\"#/licenses\"]'\n }\n","repo_name":"yangjing6688/framework","sub_path":"extauto/a3/defs/NavigatorWebElementDefinitions.py","file_name":"NavigatorWebElementDefinitions.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"25532889110","text":"# ============================= Import modules =======================================\r\nimport time\r\nfrom http_requests import *\r\nfrom instrument import *\r\n\r\n# ========================== Experiment parameters ===============================\r\n# the 14 variables of the database for the control of the oscilloscope\r\nreset = int(0)\r\nautoscale = int(0)\r\nsaveScreen = int(0)\r\nsaveWaveform = int(0)\r\nvertScale = float(1) # V per vertical division\r\nvertOffset = float(0) # in V\r\nhoriScale = float(0.001) # seconds per horizontal division\r\nhoriOffset = float(0) # in seconds\r\ntrigEdgelevel = float(0) # in V\r\naquMode = int(1) # 1:NORMal, 2:PEAK, 3:AVERage, 4:HRESolution\r\nsubmit = int(0)\r\nwaveform = \"\"\r\nerror = int\r\nscreen_img = \"\"\r\n\r\npostUrl = \"https://idee3d.xyz/remolab/oscilloscope/posttodb.php\" # url adress of the php file to update the data of the database\r\ngetUrl = \"https://idee3d.xyz/remolab/oscilloscope/dbtoget.php\" # url adress of the php file to get data from the database\r\ninstrAdress = \"USB0::0x0957::0x1799::MY57230744::INSTR\" # VISA adress of the instrument\r\n\r\n# ========================== Experiment functions =================================\r\ndef getParse(getStr) : # data out of the database is encoded like param1;param2;param3;...;param_n; ,here n=14\r\n index = [] # creation of an array index\r\n curr = 0 # current position in the get string\r\n nbSeparators = 13 # number of separators ; with the dbtoget.php file\r\n for i in range(nbSeparators) : # there we parcour all the get string and we save the indexes where we find a \";\" character\r\n curr = getStr.index(\";\",curr+1)\r\n index.append(curr)\r\n # then we can slice the get string in differents part with the indexes founds previously and then update the variables\r\n reset = int(getStr[0:index[0]])\r\n autoscale = int(getStr[index[0]+1:index[1]])\r\n saveScreen = int(getStr[index[1]+1:index[2]])\r\n saveWaveform = int(getStr[index[2]+1:index[3]])\r\n vertScale = float(getStr[index[3]+1:index[4]])\r\n vertOffset = float(getStr[index[4]+1:index[5]])\r\n horiScale = float(getStr[index[5]+1:index[6]])\r\n horiOffset = float(getStr[index[6]+1:index[7]])\r\n trigEdgelevel = float(getStr[index[7]+1:index[8]])\r\n aquMode = int(getStr[index[8]+1:index[9]])\r\n submit = int(getStr[index[9]+1:index[10]])\r\n waveform = getStr[index[10]+1:index[11]]\r\n error = int(getStr[index[11]+1:index[12]])\r\n screen_img = getStr[index[12]+1:]\r\n return reset,autoscale,saveScreen,saveWaveform,vertScale,vertOffset,horiScale,horiOffset,trigEdgelevel,aquMode,submit,waveform,error,screen_img\r\n\r\ndef errorHandler(error) : # error callback function that is called by the class oscilloscope when there is an error with the command sent to the oscilloscope\r\n if error : # if there is an error we pass the error variable of the database to 1, and all the others \"event variables\" to 0, and we reset the oscilloscope\r\n payload = {\"reset\": 0, \"autoscale\": 0, \"savewaveform\": 0, \"saveconfig\": 0, \"submit\": 0, \"error\": 1}\r\n httpPost(postUrl, payload) # sending the POST request with the data\r\n time.sleep(4) # waiting a bit\r\n osc.reset() # reseting the oscilloscope\r\n time.sleep(2) # waiting a bit\r\n else : # if there is no error with the actual command sent but there was an error with the previous\r\n payload = {\"error\": 0} # we pass the error variable of the database back to 0\r\n httpPost(postUrl, payload) # sending the POST request with the data\r\n\r\n\r\n# ============================= MAIN PROGRAM ==========================================\r\nosc = oscilloscope(instrAdress, errorHandler)\r\n\r\n# setting all the \"event variables\" to 0 and the waveform to vide (variables in the database)\r\npayload = {\"reset\": 0, \"autoscale\": 0, \"savewaveform\": 0, \"savescreen\": 0, \"submit\": 0, \"waveform\": \"vide\", \"error\": 0, \"screen_img\": \"vide\"}\r\nhttpPost(postUrl, payload)\r\n\r\nwhile True : # main loop\r\n # at each turn of the main loop, we update the program variables with the variables of the database : db vars => prgrm vars\r\n reset,autoscale,saveScreen,saveWaveform,vertScale,vertOffset,horiScale,horiOffset,trigEdgelevel,aquMode,submit,waveform,error,screen_img = getParse(httpGet(getUrl))\r\n \r\n if (reset == 1):\r\n osc.reset() # reseting the oscilloscope\r\n payload = {\"reset\": 0}\r\n httpPost(postUrl, payload) # setting back the reset event variable to 0\r\n\r\n if (autoscale == 1):\r\n osc.autoscale() # autoscale\r\n payload = {\"autoscale\": 0}\r\n httpPost(postUrl, payload) # setting back the autoscale event variable to 0\r\n\r\n if (submit == 1):\r\n osc.setVertScale(1, vertScale) # setting the differents parameters for the oscilloscope (channel 1)\r\n osc.setVertOffset(1, vertOffset)\r\n osc.setHoriScale(horiScale/1000)\r\n osc.setHoriOffset(horiOffset/1000)\r\n osc.setTriggerEdgeLevel(1, trigEdgelevel)\r\n osc.setAquisitionMode(aquMode)\r\n payload = {\"submit\": 0}\r\n httpPost(postUrl, payload) # setting back the submit event variable to 0\r\n\r\n if (saveWaveform == 1):\r\n waveform = osc.saveWaveform(1, 300) # saving the waveform with 300 points (channel 1)\r\n waveform_str = \"\" # creating a variable to convert the waveform array into a string with csv format\r\n for i in range(len(waveform[0])) : # fomating the waveform into a csv format\r\n waveform_str += str(waveform[0][i])+','+str(waveform[1][i])+'\\n'\r\n payload = {\"waveform\": waveform_str, \"savewaveform\": 0}\r\n httpPost(postUrl, payload) # setting back the savewaveform event variable to 0, and sending the waveform string\r\n \r\n if (saveScreen == 1):\r\n screen = osc.saveScreen() # saving the screen of the oscilloscope into a base64 string\r\n payload = {\"savescreen\": 0, \"screen_img\": screen}\r\n httpPost(postUrl, payload) # setting back the savescreen event variable to 0, and sending the screen image string\r\n\r\n \r\n time.sleep(3) # delay between each turn of the main loop\r\n\r\n\r\n\r\n\r\n\r\n#UPDATE `oscillo` SET `reset`=0,`autoscale`=0,`submit`=0 WHERE `id`=1\r\n \r\n#osc = oscilloscope()\r\n#osc.autoscale()\r\n#print(osc.saveWaveform(1, 200))\r\n \r\n\r\n\r\n \r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n \r\n \r\n","repo_name":"Twistix/remo-lab","sub_path":"final_program/oscillo/prgrm_oscillo.py","file_name":"prgrm_oscillo.py","file_ext":"py","file_size_in_byte":6312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"3305709633","text":"#!/usr/bin/env python\n\"\"\"\nRetrieve list of blocked items\n\"\"\"\n\nimport pyslurm\n\n\ndef display(block_dict):\n \"\"\"Format output\"\"\"\n if block_dict:\n\n date_fields = []\n\n print(f\"{'':*^80}\")\n\n for key, value in block_dict.items():\n\n print(f\"{key} :\")\n for part_key in sorted(value.items()):\n\n if part_key in date_fields:\n ddate = value[part_key]\n if ddate == 0:\n print(f\"\\t{part_key:<17} : N/A\")\n elif (\"reason_uid\" in part_key) and (value[\"reason\"] is None):\n print(f\"\\t{part_key:<17} :\")\n else:\n ddate = pyslurm.epoch2date(ddate)\n print(f\"\\t{part_key:<17} : {ddate}\")\n elif part_key == \"connection_type\":\n print(\n f\"\\t{part_key:<17} : {pyslurm.get_connection_type(value[part_key])}\"\n )\n elif part_key == \"state\":\n print(f\"\\t{part_key:<17} : {value[part_key]}\")\n else:\n print(f\"\\t{part_key:<17} : {value[part_key]}\")\n\n print(f\"{'':*^80}\")\n\n\nif __name__ == \"__main__\":\n\n a = pyslurm.block()\n try:\n a.load()\n new_block_dict = a.get()\n except ValueError as value_error:\n print(f\"Block query failed - {value_error.args[0]}\")\n else:\n if new_block_dict:\n display(new_block_dict)\n print(f\"\\nBlock IDs - {a.ids()}\\n\")\n else:\n print(\"No Blocks found !\")\n","repo_name":"PySlurm/pyslurm","sub_path":"examples/blocks_list.py","file_name":"blocks_list.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":414,"dataset":"github-code","pt":"4"} +{"seq_id":"8881719107","text":"import numpy as np\nimport math\n\ndef _compute_apk(targets, predictions, k):\n\n if len(predictions) > k:\n predictions = predictions[:k]\n\n score = 0.0\n num_hits = 0.0\n\n for i, p in enumerate(predictions):\n if p in targets and p not in predictions[:i]:\n num_hits += 1.0\n score += num_hits / (i + 1.0)\n\n if not list(targets):\n return 0.0\n\n return score / min(len(targets), k)\n\ndef dcg_at_k(r, k, method=1):\n r = np.asfarray(r)[:k]\n if r.size:\n if method == 0:\n return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))\n elif method == 1:\n return np.sum(r / np.log2(np.arange(2, r.size + 2)))\n else:\n raise ValueError('method must be 0 or 1.')\n return 0.\n\ndef ndcg_at_k(r, k, method=1):\n dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)\n if not dcg_max:\n return 0.\n return dcg_at_k(r, k, method) / dcg_max\n\ndef cc_at_k(cc, k, CATE_NUM):\n cates = set()\n for i in range(k):\n if i > (len(cc)-1):\n break\n for c in cc[i]:\n cates.add(c)\n return len(cates) / CATE_NUM\n\ndef _compute_precision_recall(targets, predictions, k, iidcate_map, cate_num):\n\n pred = predictions[:k]\n r = []\n cc = []\n for i in pred:\n if i in targets:\n r.append(1)\n else:\n r.append(0)\n if i == 0:\n continue\n else:\n cc.append(iidcate_map[i-1])\n\n num_hit = len(set(pred).intersection(set(targets)))\n precision = float(num_hit) / len(pred)\n recall = float(num_hit) / len(targets)\n ndcg = ndcg_at_k(r, k) \n cc = cc_at_k(cc, k, cate_num)\n return precision, recall, ndcg, cc\n\ndef evaluate_ranking(model, test, config, l_kernel, cate, train=None, k=10):\n \"\"\"\n Compute Precision@k, Recall@k scores and average precision (AP).\n One score is given for every user with interactions in the test\n set, representing the AP, Precision@k and Recall@k of all their\n test items.\n\n Parameters\n ----------\n\n model: fitted instance of a recommender model\n The model to evaluate.\n test: :class:`spotlight.interactions.Interactions`\n Test interactions.\n train: :class:`spotlight.interactions.Interactions`, optional\n Train interactions. If supplied, rated items in\n interactions will be excluded.\n k: int or array of int,\n The maximum number of predicted items\n \"\"\"\n\n test = test.tocsr()\n\n if train is not None:\n train = train.tocsr()\n\n if not isinstance(k, list):\n ks = [k]\n else:\n ks = k\n\n precisions = [list() for _ in range(len(ks))]\n recalls = [list() for _ in range(len(ks))]\n ndcgs = [list() for _ in range(len(ks))]\n ccs = [list() for _ in range(len(ks))]\n apks = list()\n\n for user_id, row in enumerate(test):\n\n if not len(row.indices):\n continue\n \n predictions = -model.predict(user_id)\n if train is not None:\n rated = set(train[user_id].indices)\n else:\n rated = []\n \n predictions = predictions.argsort()\n predictions = [p for p in predictions if p not in rated]\n \n targets = row.indices \n if 0 in targets:\n print('there is 0')\n\n for i, _k in enumerate(ks):\n precision, recall, ndcg, cc = _compute_precision_recall(targets, predictions, _k, cate, config.cate_num)\n precisions[i].append(precision)\n recalls[i].append(recall)\n ndcgs[i].append(ndcg)\n ccs[i].append(cc)\n\n apks.append(_compute_apk(targets, predictions, k=np.inf))\n\n precisions = [np.array(i) for i in precisions]\n recalls = [np.array(i) for i in recalls]\n\n if not isinstance(k, list):\n precisions = precisions[0]\n recalls = recalls[0]\n\n return precisions, recalls, ndcgs, ccs\n","repo_name":"l-lyl/DPPLikelihoods4SeqRec","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"4"} +{"seq_id":"39487562791","text":"__author__ = 'warreee'\n\n\nclass Node(object):\n \"\"\"\n RED = true\n BLACK = false\n \"\"\"\n def __init__(self, key, val, color = False, N = 0):\n self.key = key\n self.val = val\n self.left = None\n self.right = None\n self.N = N # number of nodes in this subtree\n self.color = color\n self.maxhi = val.hi\n\nclass RedBlackBST(object):\n\n root = None\n\n def maxhi(self):\n if self.left is not None:\n a = self.left.segment.hi\n if self.right is not None:\n b = self.right.segment.hi\n c = self.key.segment.hi\n return max(a, b, c)\n\n def isRed(self, node):\n if node is None:\n return False\n return node.color\n\n def rotateRight(self, node):\n x = node.left\n node.left = x.right\n x.right = node\n x.color = x.right.color\n x.right.color = True\n x.N = node.N\n node.N = self.size(node.left) + self.size(node.right) + 1\n\n node.maxhi = max(node.maxhi, node.left.maxhi, node.right.maxhi)\n x.maxhi = max(x.maxhi, x.left.maxhi, x.right.maxhi)\n return x\n\n def rotateLeft(self, node):\n x = node.right\n node.right = x.left\n x.left = node\n x.color = x.left.color\n x.left.color = True\n x.N = node.N\n self.maxhi()\n\n node.maxhi = max(node.maxhi, node.left.maxhi, node.right.maxhi)\n x.maxhi = max(x.maxhi, x.left.maxhi, x.right.maxhi)\n\n return x\n\n def flipColors(self, node):\n node.color = not node.color\n node.left.color = not node.left.color\n node.right.color = not node.right.color\n\n def put(self, key, val):\n self.root = self._put(self.root, key, val)\n\n self.root.color = False\n\n def _put(self, node, key, val):\n\n if node is None:\n return Node(key, val, 1)\n\n cmp = key.compare(node.key)\n if cmp == 0:\n node.left = self._put(node.left, key, val)\n elif cmp == 1:\n node.right = self._put(node.right, key, val)\n else:\n node.val = val\n\n if self.isRed(node.right) and not self.isRed(node.left):\n node = self.rotateLeft(node)\n if self.isRed(node.left) and self.isRed(node.left.left):\n node = self.rotateRight(node)\n if self.isRed(node.left) and self.isRed(node.right):\n self.flipColors(node)\n\n node.N = self.size(node.left) + self.size(node.right) + 1\n return node\n\n def delete(self, key):\n if not self.isRed(self.root.left) and not self.isRed(self.root.right):\n self.root.color = True\n\n self.root = self._delete(self.root, key)\n\n if not self.isEmpty():\n self.root.color = False\n\n def _delete(self, node, key):\n if key.compare(node.key) == 0:\n if not self.isRed(node.left) and not self.isRed(node.left.left):\n node = self.moveRedLeft(node)\n node.left = self._delete(node.left, key)\n\n else:\n if self.isRed(node.left):\n node = self.rotateRight(node)\n if key.compare(node.key) == 2 and node.right is None:\n return None\n if not self.isRed(node.right) and not self.isRed(node.right.left):\n node = self.moveRedRight(node)\n if key.compare(node.key) == 2:\n x = self.min(node.right)\n node.key = x.key\n node.val = x.val\n node.right = self.deleteMin(node.right)\n else:\n node.right = self._delete(node.right, key)\n return self.balance(node)\n\n def min(self, node):\n if node.left is None:\n return node\n else:\n return self.min(node.left)\n\n\n def deleteMin(self, node):\n if node.left is None:\n return None\n\n if not self.isRed(node.left) and not self.isRed(node.left.left):\n node = self.moveRedLeft(node)\n\n node.left = self.deleteMin(node.left)\n return self.balance(node)\n\n def fixUpMax(self, node):\n while node is not self.root:\n node.maxhi = max(node.maxhi, node.left.maxhi, node.right.maxhi)\n\n\n def isEmpty(self):\n return self.root is None\n\n def size(self, node):\n if node is None:\n return 0\n return node.N\n def max(self, node):\n if node is None:\n return 0\n else:\n return node.maxhi\n\n def moveRedLeft(self, node):\n self.flipColors(node)\n if self.isRed(node.right.left):\n node.right = self.rotateRight(node.right)\n node = self.rotateLeft(node)\n return node\n\n def moveRedRight(self, node):\n self.flipColors(node)\n if self.isRed(node.left.left):\n node = self.rotateRight(node)\n return node\n\n def balance(self, node):\n if self.isRed(node.right):\n node = self.rotateLeft(node)\n if self.isRed(node.left) and self.isRed(node.left.left):\n node = self.rotateRight(node)\n if self.isRed(node.left) and self.isRed(node.right):\n self.flipColors(node)\n node.N = self.size(node.left) + self.size(node.right) + 1\n return node\n\n def intervalSearch(self, segment, newroot):\n\n overlaplist = list()\n if segment.overlaps(newroot.val):\n overlaplist.append(segment)\n if newroot.left is not None:\n self.intervalSearch(segment, newroot.left.val)\n if newroot.left is not None:\n self.intervalSearch(segment, newroot.right.val)\n\n elif newroot.left.val is None and newroot.right is not None:\n self.intervalSearch(segment, newroot.right.val)\n\n else:\n self.intervalSearch(segment, newroot.left.val)","repo_name":"SofieT/tmi","sub_path":"RedBlackBST.py","file_name":"RedBlackBST.py","file_ext":"py","file_size_in_byte":5827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"6276152897","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 13 14:30:46 2019\nLast edited on 06/ Nov/ 2019\n\nauthor: Wei-Chung\n\ndescription: this is the denoise function \"denoise_tv_chambolle\" in skimage.\nIt only supports numpy array, this function transfer it and it support torch.tensor.\n\"\"\"\n\nimport torch\n\n#%%\ndef diff(image, axis):\n '''\n Take the difference of different dimension(1~4) of images\n '''\n ndim = image.ndim\n if ndim == 3: \n if axis == 0:\n return image[1:,:,:] - image[:-1,:,:]\n elif axis == 1:\n return image[:,1:,:] - image[:,:-1,:]\n elif axis == 2:\n return image[:,:,1:] - image[:,:,:-1]\n \n elif ndim == 2: \n if axis == 0:\n return image[1:,:] - image[:-1,:]\n elif axis == 1:\n return image[:,1:] - image[:,:-1]\n elif ndim == 4: \n if axis == 0:\n return image[1:,:,:,:] - image[:-1,:,:,:]\n elif axis == 1:\n return image[:,1:,:,:] - image[:,:-1,:,:]\n elif axis == 2:\n return image[:,:,1:,:] - image[:,:,:-1,:]\n elif axis == 3:\n return image[:,:,:,1:] - image[:,:,:,:-1]\n elif ndim == 1: \n if axis == 0:\n return image[1:] - image[:-1]\n\n \ndef _denoise_tv_chambolle_nd_torch(image, weight=0.1, eps=2.e-4, n_iter_max=200):\n \"\"\"\n image : torch.tensor\n n-D input data to be denoised.\n weight : float, optional\n Denoising weight. The greater `weight`, the more denoising (at\n the expense of fidelity to `input`).\n eps : float, optional\n Relative difference of the value of the cost function that determines\n the stop criterion. The algorithm stops when:\n (E_(n-1) - E_n) < eps * E_0\n n_iter_max : int, optional\n Maximal number of iterations used for the optimization.\n Returns\n -------\n out : torch.tensor\n Denoised array of floats.\n \n \"\"\" \n \n \n ndim = image.ndim\n pt = torch.zeros((image.ndim, ) + image.shape, dtype=image.dtype)\n gt = torch.zeros_like(pt)\n dt = torch.zeros_like(image)\n i = 0\n while i < n_iter_max:\n if i > 0:\n # dt will be the (negative) divergence of p\n dt = -pt.sum(0)\n slices_dt = [slice(None), ] * ndim\n slices_pt = [slice(None), ] * (ndim + 1)\n for ax in range(ndim):\n slices_dt[ax] = slice(1, None)\n slices_pt[ax+1] = slice(0, -1)\n slices_pt[0] = ax\n dt[tuple(slices_dt)] += pt[tuple(slices_pt)]\n slices_dt[ax] = slice(None)\n slices_pt[ax+1] = slice(None)\n out = image + dt\n else:\n out = image\n Et = torch.mul(dt,dt).sum()\n \n # gt stores the gradients of out along each axis\n # e.g. gt[0] is the first order finite difference along axis 0\n slices_gt = [slice(None), ] * (ndim + 1)\n for ax in range(ndim):\n slices_gt[ax+1] = slice(0, -1)\n slices_gt[0] = ax\n gt[tuple(slices_gt)] = diff(out, ax)\n slices_gt[ax+1] = slice(None)\n \n norm = torch.sqrt((gt ** 2).sum(axis=0)).unsqueeze(0)\n Et = Et + weight * norm.sum()\n tau = 1. / (2.*ndim)\n norm = norm * tau / weight\n norm = norm + 1.\n pt = pt - tau * gt\n pt = pt / norm\n Et = Et / float(image.view(-1).shape[0])\n if i == 0:\n E_init = Et\n E_previous = Et\n else:\n if torch.abs(E_previous - Et) < eps * E_init:\n break\n else:\n E_previous = Et\n i += 1\n \n return out\n\n\ndef denoise_tv_chambolle_torch(image, weight=0.1, eps=2.e-4, n_iter_max=200,\n multichannel=False):\n \n \"\"\"Perform total-variation denoising on n-dimensional images.\n Parameters\n ----------\n image : torch.tensor of ints, uints or floats\n Input data to be denoised. `image` can be of any numeric type,\n but it is cast into an torch.tensor of floats for the computation\n of the denoised image.\n weight : float, optional\n Denoising weight. The greater `weight`, the more denoising (at\n the expense of fidelity to `input`).\n eps : float, optional\n Relative difference of the value of the cost function that\n determines the stop criterion. The algorithm stops when:\n (E_(n-1) - E_n) < eps * E_0\n n_iter_max : int, optional\n Maximal number of iterations used for the optimization.\n multichannel : bool, optional\n Apply total-variation denoising separately for each channel. This\n option should be true for color images, otherwise the denoising is\n also applied in the channels dimension.\n Returns\n -------\n out : torch.tensor\n Denoised image.\n \n \"\"\"\n im_type = (image.numpy()).dtype\n if not im_type.kind == 'f':\n image = image.type(torch.float64)\n image = image/torch.abs(image.max()+image.min())\n \n if multichannel:\n out = torch.zeros_like(image)\n for c in range(image.shape[-1]):\n out[...,c] = _denoise_tv_chambolle_nd_torch(image[..., c], weight, eps, n_iter_max)\n else:\n out = _denoise_tv_chambolle_nd_torch(image, weight, eps, n_iter_max)\n \n return out\n\n","repo_name":"shakes76/PatternFlow","sub_path":"algorithms/denoise/45033027_denoise_tv_chambolle/torch_denoise_tv_chambolle.py","file_name":"torch_denoise_tv_chambolle.py","file_ext":"py","file_size_in_byte":5379,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"4"} +{"seq_id":"13188831499","text":"import pyglet\nfrom pyglet.window import key\n\nclass Controller(pyglet.window.Window):\n def __init__(self, width, height, title=\"Pyglet window\"):\n super().__init__(width, height, title)\n self.x = 0\n self.total_time = 0.0\n \ncontroller = Controller(width=1280, height=800)\nlabel = pyglet.text.Label('Hola CG 2023', font_name='Times New Roman', font_size=36, \n x=controller.x, y=controller.height//2, anchor_x='left', anchor_y='center')\nimage = pyglet.resource.image('assets/boo.png')\n\n@controller.event\ndef on_draw():\n controller.clear()\n label.draw()\n image.blit(controller.x, 100)\n\n@controller.event\ndef on_key_press(symbol, modifiers):\n if symbol == key.RIGHT:\n controller.x = controller.x + 10\n label.x = controller.x \n elif symbol == key.LEFT:\n controller.x = controller.x - 10\n label.x = controller.x\n elif symbol == key.ESCAPE:\n controller.close()\n\ndef update(dt, controller):\n controller.x += 1\n label.x = controller.x\n \npyglet.clock.schedule_interval(update, 1/60, controller)\npyglet.app.run()","repo_name":"ivansipiran/CC3501","sub_path":"clase0.py","file_name":"clase0.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"4"} +{"seq_id":"12064215071","text":"import random\nimport re\n\nclass BadwordsFilter(object):\n def __init__(self, filtro, ignore_case=True, reemplazo=\"****\",\n completar=True, palabras=False):\n\n\n self.badwords = filtro\n self.ignore_case = ignore_case\n self.reemplazo = reemplazo\n self.completar = completar\n self.palabras = palabras\n\n def limpiar(self, length):\n\n return ''.join([random.choice(self.reemplazo) for i in\n range(length)])\n\n def replacer(self, igual):\n value = igual.group()\n if self.completar:\n return self.limpiar(len(value))\n else:\n return value[0]+self.limpiar(len(value)-2)+value[-1]\n\n def cambiar(self, texto):\n #Cambia el String desde una mala palabra.\n\n regexp_insidewords = {\n True: r'(%s)',\n False: r'\\b(%s)\\b',\n }\n\n regexp = (regexp_insidewords[self.palabras] %\n '|'.join(self.badwords))\n\n r = re.compile(regexp, re.IGNORECASE if self.ignore_case else 0)\n\n return r.sub(self.replacer, texto)\n\n\n\n'''if __name__ == '__main__':\n\n f = BadwordsFilter(['aguacate', 'gono\\w+','\\w+rr','\\w+norr\\w+'], reemplazo=\"*\")\n texto = input('Ingrese sus vulgaridades: ')\n\n print (f.cambiar(texto))\n\n f.palabras = True\n print (f.cambiar(texto)'''\n\n\n","repo_name":"Juanp0128/Datos2","sub_path":"Badwords.py","file_name":"Badwords.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"31446469529","text":"# dp문제\n# 동전1에서 가능한 가짓수 > 최소 가짓수로\n# 동전 1에서처럼 문제 분할\n# k원 -> 1~k원까지, 점화식은 k원 가짓수 구하기 위해서는 k-동전액수중에서 최소값 +1하면 됨\n\nimport sys\n\nn, k = map(int, input().split())\ncoins = {int(input()) for _ in range(n)}\ndp = [0 for _ in range(k+1)]\n\nfor i in range(1, k+1):\n q = sys.maxsize\n for c in coins:\n if i-c >= 0:\n q = min(q, dp[i-c])\n dp[i] = q + 1\n\nif dp[k] >= sys.maxsize:\n print(-1)\nelse:\n print(dp[k])","repo_name":"veryneuron/algorithm_study","sub_path":"baekjoon/algorithms/2294.py","file_name":"2294.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"40969419081","text":"import misc\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras import backend as K\r\nimport numpy as np\r\nimport os\r\n\r\n\r\ndata = misc.getNewRegressionData(\"7\")\r\n\r\n# Keras only admits numpy arrays as input\r\nnpdata = np.array([np.array(x) for x in data])\r\n\r\n# Shuffling training and test data\r\nnp.random.shuffle(npdata)\r\n\r\n# Training set is 80% of total data\r\nxtrain = npdata[:336, :-2]\r\nytrain = npdata[:336, -2:]\r\n\r\n# Test set is the remaining 20%\r\nxtest = npdata[336:, :-2]\r\nytest = npdata[336:, -2:]\r\n\r\nactivation_function = 'relu'\r\n\r\n# 4 layers (different parameters give different results)\r\napproved_models = [f for f in os.listdir(\"./regression_models/tested/approved\")]\r\napproved_min_models = [f for f in os.listdir(\"./regression_models/tested/min_approved\")]\r\nfor model in approved_min_models:\r\n if model not in approved_models:\r\n approved_models.append(model)\r\n\r\nmodels = []\r\nfor model in approved_models:\r\n models.append(misc.getLayersFromModelName(model))\r\n\r\nfor layers in models:\r\n\r\n model = Sequential()\r\n if layers[0] != 0: model.add(Dense(layers[0], activation=activation_function, input_dim=4))\r\n if layers[1] != 0: model.add(Dense(layers[1], activation=activation_function))\r\n if layers[2] != 0: model.add(Dense(layers[2], activation=activation_function))\r\n if layers[3] != 0: model.add(Dense(layers[3], activation=activation_function))\r\n if layers[4] != 0: model.add(Dense(layers[4], activation=activation_function))\r\n model.add(Dense(2, activation='linear'))\r\n\r\n model.compile(loss='mse', optimizer='adam', metrics=['mse', 'mae'])\r\n\r\n # Can modify epochs\r\n model.fit(xtrain, ytrain, epochs=100, batch_size=20, verbose=0)\r\n\r\n result = model.evaluate(xtest, ytest, batch_size=20)\r\n\r\n if result[0] < 1.5:\r\n save_name = 'new_regression_models/pot7/fixed/new_model_relu_{}_{}_{}_{}_{}'.format(layers[0], layers[1],\r\n layers[2], layers[3],\r\n layers[4])\r\n model.save(save_name)\r\n print(\"Model\", save_name, \"saved.\")\r\n\r\n K.clear_session()\r\n\r\n","repo_name":"RFajardoMonzon/Keras_IPS_NN_TFG","sub_path":"new_train_regression.py","file_name":"new_train_regression.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"18608724201","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# x,y coords\nX = np.arange(-1.0, 1.0, 0.2)\nY = np.arange(-1.0, 1.0, 0.2)\n\n# Grid\ngrid = np.zeros( (10,10) )\n\n# Weight\nw_inp_mid = np.array([[4.0, 4.0], \n [4.0, 4.0]]) # Middle-layer:2×2\nw_mid_out = np.array([[1.0], \n [-1.0]]) # Output-layer:2×1\n\n# Bias\nb_inp_mid = np.array([3.0, -3.0]) # Middle\nb_mid_out = np.array([0.1]) # Output\n\n# Middle layer\ndef middle_layer(x, w, b):\n u = np.dot(x, w) + b\n return 1/(1+np.exp(-u))\n\n# Output layer\ndef output_layer(x, w, b):\n u = np.dot(x, w) + b\n return u\n\n\n# Calc the neurons in each grid square\nfor j in range(10):\n for i in range(10):\n\n # Forward propagation\n inp = np.array( [X[i], Y[j]] )\n mid = middle_layer(inp, w_inp_mid, b_inp_mid)\n out = output_layer(mid, w_mid_out, b_mid_out)\n\n # Store output of NN in grid\n grid[j][i] = out[0]\n\n # end for\n# end for\n\n# Show grid\nplt.imshow(grid, \"gray\", vmin=0.0, vmax=1.0)\nplt.colorbar()\nplt.show()","repo_name":"tom-uchida/The_First_Deep_Learning","sub_path":"4_Neural_network/4.6_neural_network_implementation/py/4.6_nn_for_regression.py","file_name":"4.6_nn_for_regression.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"37812343621","text":"#!/usr/bin/env python3\n\"\"\"\nModule contains the Cache class\n\"\"\"\nimport redis\nimport uuid\nfrom typing import Union, Callable, Any\nfrom functools import wraps\n\n\ndef count_calls(method: Callable) -> Callable:\n \"\"\"\n Tracks numbe of calls made to a method in a Cache class\n \"\"\"\n @wraps(method)\n def wrapper(self, *args, **kwargs) -> Any:\n \"\"\"\n Returns the given method after incrementing the call counter\n \"\"\"\n\n if isinstance(self._redis, redis.Redis):\n self._redis.incr(method.__qualname__)\n return method(self, *args, **kwargs)\n\n return wrapper\n\n\ndef call_history(method: Callable) -> Callable:\n \"\"\"\n Tracks the call history of a method in a Cache class\n \"\"\"\n @wraps(method)\n def invoker(self, *args, **kwargs) -> Any:\n \"\"\"\n Returns the method's output after storing its inputs &\n output\n \"\"\"\n in_key = '{}:inputs'.format(method.__qualname__)\n out_key = '{}:outputs'.format(method.__qualname__)\n if isinstance(self._redis, redis.Redis):\n self._redis.rpush(in_key, str(args))\n output = method(self, *args, **kwargs)\n if isinstance(self._redis, redis.Redis):\n self._redis.rpush(out_key, output)\n return output\n return invoker\n\n\ndef replay(fn: Callable) -> None:\n \"\"\"\n Displays the call history of a Cache class method\n \"\"\"\n if fn is None or not hasattr(fn, '__self__'):\n return\n redis_store = getattr(fn.__self__, '_redis', None)\n if not isinstance(redis_store, redis.Redis):\n return\n func_name = fn.__qualname__\n in_key = '{}:inputs'.format(func_name)\n out_key = '{}:outputs'.format(func_name)\n func_call_count = 0\n if redis_store.exists(func_name) != 0:\n func_call_count = int(redis_store.get(func_name))\n print('{} was called {} times:'.format(func_name, func_call_count))\n func_inputs = redis_store.lrange(in_key, 0, -1)\n func_outputs = redis_store.lrange(out_key, 0, -1)\n for func_input, func_output in zip(func_inputs, func_outputs):\n print('{}(*{}) -> {}'.format(func_name,\n func_input.decode('utf-8'),\n func_output))\n\n\nclass Cache:\n \"\"\"\n Cache class\n \"\"\"\n def __init__(self) -> None:\n self._redis = redis.Redis()\n self._redis.flushdb(True)\n\n @count_calls\n @call_history\n def store(self, data: Union[str, bytes, int, float]) -> str:\n \"\"\"\n Stores a value in Redis and returns a unique key\n \"\"\"\n key = str(uuid.uuid4())\n self._redis.set(key, data)\n return key\n\n def get(self, key: str, fn: Callable = None) ->\\\n Union[str, bytes, int, float]:\n \"\"\"\n Method takes a key string argument and an optional Callable argument\n \"\"\"\n data = self._redis.get(key)\n return fn(data) if fn is not None else data\n\n def get_str(self, key: str) -> str:\n \"\"\"\n Method automatically parameterizes Cache.get with the correct\n conversion function for strings - in this case decode\n \"\"\"\n return self.get(key, lambda x: x.decode('utf-8'))\n\n def get_int(self, key: str) -> int:\n \"\"\"\n Method automatically parameterizes Cache.get with the correct\n conversion function for integers\n \"\"\"\n return self.get(key, lambda x: int(x))\n","repo_name":"cheiy/alx-backend-storage","sub_path":"0x02-redis_basic/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"1350948435","text":"from luxpy.utils import np, sp, np2d, _EPS, asplit\nfrom scipy.special import erf, erfinv\nfrom scipy import stats\nfrom scipy.interpolate import interp1d \n\n__all__ = ['normalize_3x3_matrix','symmM_to_posdefM','check_symmetric',\n 'check_posdef','positive_arctan','line_intersect','erf', 'erfinv', \n 'histogram', 'pol2cart', 'cart2pol', 'spher2cart', 'cart2spher']\n__all__ += ['bvgpdf','mahalanobis2','dot23', 'rms','geomean','polyarea']\n__all__ += ['magnitude_v','angle_v1v2']\n__all__ += ['v_to_cik', 'cik_to_v', 'fmod', 'remove_outliers','fit_ellipse','fit_cov_ellipse']\n__all__ += ['in_hull','interp1_sprague5','interp1', 'ndinterp1','ndinterp1_scipy']\n__all__ += ['box_m','pitman_morgan', 'stress','stress_F_test','mean_distance_weighted']\n\n\n#------------------------------------------------------------------------------\ndef normalize_3x3_matrix(M, xyz0 = np.array([[1.0,1.0,1.0]])):\n \"\"\"\n Normalize 3x3 matrix M to xyz0 -- > [1,1,1]\n \n | If M.shape == (1,9): M is reshaped to (3,3)\n \n Args:\n :M: \n | ndarray((3,3) or ndarray((1,9))\n :xyz0: \n | 2darray, optional \n \n Returns:\n :returns: \n | normalized matrix such that M*xyz0 = [1,1,1]\n \"\"\"\n M = np2d(M)\n if M.shape[-1]==9:\n M = M.reshape(3,3)\n if xyz0.shape[0] == 1:\n return np.dot(np.diagflat(1/(np.dot(M,xyz0.T))),M)\n else:\n return np.concatenate([np.dot(np.diagflat(1/(np.dot(M,xyz0[1].T))),M) for i in range(xyz0.shape[0])],axis=0).reshape(xyz0.shape[0],3,3)\n\n#------------------------------------------------------------------------------\ndef line_intersect(a1, a2, b1, b2):\n \"\"\"\n Line intersections of series of two line segments a and b. \n \n Args:\n :a1: \n | ndarray (.shape = (N,2)) specifying end-point 1 of line a\n :a2: \n | ndarray (.shape = (N,2)) specifying end-point 2 of line a\n :b1: \n | ndarray (.shape = (N,2)) specifying end-point 1 of line b\n :b2: \n | ndarray (.shape = (N,2)) specifying end-point 2 of line b\n \n Note: \n N is the number of line segments a and b.\n \n Returns:\n :returns: \n | ndarray with line-intersections (.shape = (N,2))\n \n References:\n 1. https://stackoverflow.com/questions/3252194/numpy-and-line-intersections\n \"\"\"\n T = np.array([[0.0, -1.0], [1.0, 0.0]])\n da = np.atleast_2d(a2 - a1)\n db = np.atleast_2d(b2 - b1)\n dp = np.atleast_2d(a1 - b1)\n dap = np.dot(da, T)\n denom = np.sum(dap * db, axis=1)\n num = np.sum(dap * dp, axis=1)\n return np.atleast_2d(num / denom).T * db + b1\n\n#------------------------------------------------------------------------------\ndef positive_arctan(x,y, htype = 'deg'):\n \"\"\"\n Calculate positive angle (0°-360° or 0 - 2*pi rad.) from x and y.\n \n Args:\n :x: \n | ndarray of x-coordinates\n :y: \n | ndarray of y-coordinates\n :htype:\n | 'deg' or 'rad', optional\n | - 'deg': hue angle between 0° and 360°\n | - 'rad': hue angle between 0 and 2pi radians\n \n Returns:\n :returns:\n | ndarray of positive angles.\n \"\"\"\n if htype == 'deg':\n r2d = 180.0/np.pi\n h360 = 360.0\n else:\n r2d = 1.0\n h360 = 2.0*np.pi\n h = np.atleast_1d((np.arctan2(y,x)*r2d))\n h[np.where(h<0)] = h[np.where(h<0)] + h360\n return h\n\n\n#------------------------------------------------------------------------------\ndef dot23(A,B, keepdims = False):\n \"\"\"\n Dot product of a 2-d ndarray with a (N x K x L) 3-d ndarray \n using einsum().\n \n Args:\n :A: \n | ndarray (.shape = (M,N))\n :B: \n | ndarray (.shape = (N,K,L))\n \n Returns:\n :returns: \n | ndarray (.shape = (M,K,L))\n \"\"\"\n if (len(A.shape)==2) & (len(B.shape)==3):\n dotAB = np.einsum('ij,jkl->ikl',A,B)\n if (len(B.shape)==3) & (keepdims == True):\n dotAB = np.expand_dims(dotAB,axis=1)\n elif (len(A.shape)==2) & (len(B.shape)==2):\n dotAB = np.einsum('ij,jk->ik',A,B)\n if (len(B.shape)==2) & (keepdims == True):\n dotAB = np.expand_dims(dotAB,axis=1)\n \n return dotAB\n\n#------------------------------------------------------------------------------\ndef check_symmetric(A, atol = 1.0e-9, rtol = 1.0e-9):\n \"\"\"\n Check if A is symmetric.\n \n Args:\n :A: \n | ndarray\n :atol:\n | float, optional\n | The absolute tolerance parameter (see Notes of numpy.allclose())\n :rtol:\n | float, optional\n | The relative tolerance parameter (see Notes of numpy.allclose())\n \n Returns:\n :returns:\n | Bool\n | True: the array is symmetric within the given tolerance\n \"\"\"\n return np.allclose(A, A.T, atol = atol, rtol = rtol)\n\n\ndef check_posdef(A, atol = 1.0e-9, rtol = 1.0e-9):\n \"\"\"\n Checks positive definiteness of a matrix via Cholesky.\n \n Args:\n :A: \n | ndarray\n :atol:\n | float, optional\n | The absolute tolerance parameter (see Notes of numpy.allclose())\n :rtol:\n | float, optional\n | The relative tolerance parameter (see Notes of numpy.allclose())\n \n Returns:\n :returns:\n | Bool\n | True: the array is positive-definite within the given tolerance\n\n \"\"\"\n try:\n R = np.linalg.cholesky(A)\n if np.allclose(A, np.dot(R,R.T), atol = atol,rtol = rtol):\n return True\n else:\n return False\n except np.linalg.LinAlgError:\n return False\n\n\ndef symmM_to_posdefM(A = None, atol = 1.0e-9, rtol = 1.0e-9, method = 'make', forcesymm = True):\n \"\"\"\n Convert a symmetric matrix to a positive definite one. \n \n Args:\n :A: \n | ndarray\n :atol:\n | float, optional\n | The absolute tolerance parameter (see Notes of numpy.allclose())\n :rtol:\n | float, optional\n | The relative tolerance parameter (see Notes of numpy.allclose())\n :method: \n | 'make' or 'nearest', optional (see notes for more info)\n :forcesymm: \n | True or False, optional\n | If A is not symmetric, force symmetry using: \n | A = numpy.triu(A) + numpy.triu(A).T - numpy.diag(numpy.diag(A))\n \n Returns:\n :returns:\n | ndarray with positive-definite matrix.\n \n Notes on supported methods:\n 1. `'make': A Python/Numpy port of Muhammad Asim Mubeen's matlab function \n Spd_Mat.m \n `_\n 2. `'nearest': A Python/Numpy port of John D'Errico's `nearestSPD` \n MATLAB code. \n `_\n \"\"\"\n if A is not None:\n A = np2d(A)\n \n \n # Make sure matrix A is symmetric up to a certain tolerance:\n sn = check_symmetric(A, atol = atol, rtol = rtol) \n if ((A.shape[0] != A.shape[1]) | (sn != True)):\n if (forcesymm == True) & (A.shape[0] == A.shape[1]):\n A = np.triu(A) + np.triu(A).T - np.diag(np.diag(A))\n else:\n raise Exception('symmM_to_posdefM(): matrix A not symmetric.')\n \n \n if check_posdef(A, atol = atol, rtol = rtol) == True:\n return A\n else:\n\n if method == 'make':\n\n # A Python/Numpy port of Muhammad Asim Mubeen's matlab function Spd_Mat.m\n #\n # See: https://nl.mathworks.com/matlabcentral/fileexchange/45873-positive-definite-matrix\n Val, Vec = np.linalg.eig(A) \n Val = np.real(Val)\n Vec = np.real(Vec)\n Val[np.where(Val==0)] = _EPS #making zero eigenvalues non-zero\n p = np.where(Val<0)\n Val[p] = -Val[p] #making negative eigenvalues positive\n return np.dot(Vec,np.dot(np.diag(Val) , Vec.T))\n \n \n elif method == 'nearest':\n \n # A Python/Numpy port of John D'Errico's `nearestSPD` MATLAB code [1], which\n # credits [2].\n #\n # [1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd\n #\n # [2] N.J. Higham, \"Computing a nearest symmetric positive semidefinite\n # matrix\" (1988): https://doi.org/10.1016/0024-3795(88)90223-6\n #\n # See: https://stackoverflow.com/questions/43238173/python-convert-matrix-to-positive-semi-definite\n \n B = (A + A.T) / 2.0\n _, s, V = np.linalg.svd(B)\n\n H = np.dot(V.T, np.dot(np.diag(s), V))\n\n A2 = (B + H) / 2.0\n\n A3 = (A2 + A2.T) / 2.0\n\n if check_posdef(A3, atol = atol, rtol = rtol) == True:\n return A3\n\n spacing = np.spacing(np.linalg.norm(A))\n I = np.eye(A.shape[0])\n k = 1\n while not check_posdef(A3, atol = atol, rtol = rtol):\n mineig = np.min(np.real(np.linalg.eigvals(A3)))\n A3 += I * (-mineig * k**2.0+ spacing)\n k += 1\n\n return A3\n\n\n#-----------------------------------------------------------------------------\ndef bvgpdf(x, y = None, mu = None, sigmainv = None):\n \"\"\"\n Evaluate bivariate Gaussian probability density function (BVGPDF)\n \n Args:\n :x: \n | scalar or list or ndarray (.ndim = 1 or 2) with \n | x(y)-coordinates at which to evaluate bivariate Gaussian PD.\n :y: \n | None or scalar or list or ndarray (.ndim = 1) with \n | y-coordinates at which to evaluate bivariate Gaussian PD, optional.\n | If :y: is None, :x: should be a 2d array.\n :mu: \n | None or ndarray (.ndim = 2) with center coordinates of \n | bivariate Gaussian PD, optional. \n | None defaults to ndarray([0,0]).\n :sigmainv:\n | None or ndarray with 'inverse covariance matrix', optional \n | Determines the shape and orientation of the PD.\n | None default to numpy.eye(2).\n \n Returns:\n :returns:\n | ndarray with magnitude of BVGPDF(x,y) \n \n \"\"\"\n return np.exp(-0.5*mahalanobis2(x, y = y, mu = mu, sigmainv = sigmainv))\n\n#------------------------------------------------------------------------------\ndef mahalanobis2(x, y = None, z = None, mu = None, sigmainv = None):\n \"\"\"\n Evaluate the squared mahalanobis distance\n \n Args: \n :x: \n | scalar or list or ndarray (.ndim = 1 or 2) with x(y)-coordinates \n at which to evaluate the mahalanobis distance squared.\n :y: \n | None or scalar or list or ndarray (.ndim = 1) with y-coordinates \n at which to evaluate the mahalanobis distance squared, optional.\n | If :y: is None, :x: should be a 2d array.\n :z: \n | None or scalar or list or ndarray (.ndim = 1) with z-coordinates \n at which to evaluate the mahalanobis distance squared, optional.\n | If :z: is None & :y: is None, then :x: should be a 2d array.\n :mu: \n | None or ndarray (.ndim = 1) with center coordinates of the \n mahalanobis ellipse, optional. \n | None defaults to zeros(2) or zeros(3).\n :sigmainv:\n | None or ndarray with 'inverse covariance matrix', optional \n | Determines the shape and orientation of the PD.\n | None default to np.eye(2) or eye(3).\n Returns:\n :returns: \n | ndarray with magnitude of mahalanobis2(x,y[,z])\n\n \"\"\"\n if (y is None) & (z is None):\n p = x.shape[-1]\n elif (z is None):\n p = x.shape[-1] if (y is None) else 2\n elif (z is not None):\n p = 3 if (y is not None) else 2\n \n if mu is None:\n mu = np.zeros(p)\n if sigmainv is None:\n sigmainv = np.eye(p)\n \n x = np2d(x)\n mu = np2d(mu)\n\n if (y is None) & (z is None):\n x = x - mu\n if p == 2:\n x, y = asplit(x)\n elif p==3:\n x, y, z = asplit(x)\n elif (z is None):\n if y is None:\n x = x - mu\n x, y = asplit(x)\n else:\n x = x - mu[...,0] # center data on mu \n y = np2d(y) - mu[...,1] # center data on mu \n elif (z is not None):\n if (y is not None):\n x = x - mu[0] # center data on mu \n y = np2d(y) - mu[...,1] # center data on mu \n z = np2d(z) - mu[...,2] # center data on mu \n else:\n x = x - mu[...,0] # center data on mu \n y = np2d(z) - mu[...,1] # center data on mu \n \n if p == 2:\n return (sigmainv[0,0] * (x**2.0) + sigmainv[1,1] * (y**2.0) + 2.0*sigmainv[0,1]*(x*y))\n else:\n return (sigmainv[0,0] * (x**2.0) + sigmainv[1,1] * (y**2.0) + 2.0*sigmainv[0,1]*(x*y) + \n sigmainv[2,2] * (z**2.0) + 2.0*sigmainv[0,2]*(x*z) + 2.0*sigmainv[1,2]*(y*z))\n\n\n\n\n#------------------------------------------------------------------------------\ndef rms(data,axis = 0, keepdims = False):\n \"\"\"\n Calculate root-mean-square along axis.\n \n Args:\n :data: \n | list of values or ndarray\n :axis:\n | 0, optional\n | Axis along which to calculate rms.\n :keepdims:\n | False or True, optional\n | Keep original dimensions of array.\n \n Returns:\n :returns:\n | ndarray with rms values.\n \"\"\"\n data = np2d(data)\n return np.sqrt(np.power(data,2).mean(axis=axis, keepdims = keepdims))\n\n#-----------------------------------------------------------------------------\ndef geomean(data, axis = 0, keepdims = False):\n \"\"\"\n Calculate geometric mean along axis.\n \n Args:\n :data:\n | list of values or ndarray\n :axis:\n | 0, optional\n | Axis along which to calculate geomean.\n :keepdims:\n | False or True, optional\n | Keep original dimensions of array.\n \n Returns:\n :returns:\n | ndarray with geomean values. \n \"\"\"\n data = np2d(data)\n return np.power(data.prod(axis=axis, keepdims = keepdims),1/data.shape[axis])\n \n#------------------------------------------------------------------------------\ndef polyarea(x,y):\n \"\"\"\n Calculates area of polygon. \n \n | First coordinate should also be last.\n \n Args:\n :x: \n | ndarray of x-coordinates of polygon vertices.\n :y: \n | ndarray of x-coordinates of polygon vertices. \n \n Returns:\n :returns:\n | float (area or polygon)\n \n \"\"\"\n return 0.5*np.abs(np.dot(x,np.roll(y,1).T)-np.dot(y,np.roll(x,1).T))\n\n#------------------------------------------------------------------------------\ndef cart2pol(x,y = None, htype = 'deg'):\n \"\"\"\n Convert Cartesion to polar coordinates.\n \n Args:\n :x: \n | float or ndarray with x-coordinates\n :y: \n | None or float or ndarray with x-coordinates, optional\n | If None, y-coordinates are assumed to be in :x:.\n :htype:\n | 'deg' or 'rad, optional\n | Output type of theta.\n \n Returns:\n :returns: \n | (float or ndarray of theta, float or ndarray of r) values\n \"\"\"\n if y is None:\n y = x[...,1].copy()\n x = x[...,0].copy()\n return positive_arctan(x,y, htype = htype), np.sqrt(x**2 + y**2)\n\ndef pol2cart(theta, r = None, htype = 'deg'):\n \"\"\"\n Convert Cartesion to polar coordinates.\n \n Args:\n :theta: \n | float or ndarray with theta-coordinates\n :r: \n | None or float or ndarray with r-coordinates, optional\n | If None, r-coordinates are assumed to be in :theta:.\n :htype:\n | 'deg' or 'rad, optional\n | Intput type of :theta:.\n \n Returns:\n :returns:\n | (float or ndarray of x, float or ndarray of y) coordinates \n \"\"\"\n if htype == 'deg':\n d2r = np.pi/180.0\n else:\n d2r = 1.0\n if r is None:\n r = theta[...,1].copy()\n theta = theta[...,0].copy()\n theta = theta*d2r\n return r*np.cos(theta), r*np.sin(theta)\n\n#------------------------------------------------------------------------------\ndef spher2cart(theta, phi, r = 1., deg = True):\n \"\"\"\n Convert spherical to cartesian coordinates.\n \n Args:\n :theta:\n | Float, int or ndarray\n | Angle with positive z-axis.\n :phi:\n | Float, int or ndarray\n | Angle around positive z-axis starting from x-axis.\n :r:\n | 1, optional\n | Float, int or ndarray\n | radius\n \n Returns:\n :x, y, z:\n | tuple of floats, ints or ndarrays\n | Cartesian coordinates\n \"\"\"\n if deg == True:\n theta = np.deg2rad(theta)\n phi = np.deg2rad(phi)\n x= r*np.sin(theta)*np.cos(phi)\n y= r*np.sin(theta)*np.sin(phi)\n z= r*np.cos(theta)\n return x,y,z\n\ndef cart2spher(x,y,z, deg = True):\n \"\"\"\n Convert cartesian to spherical coordinates.\n \n Args: \n :x, y, z:\n | tuple of floats, ints or ndarrays\n | Cartesian coordinates\n Returns:\n :theta:\n | Float, int or ndarray\n | Angle with positive z-axis.\n :phi:\n | Float, int or ndarray\n | Angle around positive z-axis starting from x-axis.\n :r:\n | 1, optional\n | Float, int or ndarray\n | radius\n\n \"\"\"\n r = np.sqrt(x*x + y*y + z*z)\n phi = np.arctan2(y,x)\n phi[phi<0.] = phi[phi<0.] + 2*np.pi\n zdr = z/r\n zdr[zdr > 1.] = 1.\n zdr[zdr<-1.] = -1\n theta = np.arccos(zdr)\n if deg == True:\n theta = theta*180/np.pi\n phi = phi *180/np.pi\n return theta, phi, r \n\n\n#------------------------------------------------------------------------------\n# magnitude of a vector\ndef magnitude_v(v):\n \"\"\"\n Calculates magnitude of vector.\n \n Args:\n :v: \n | ndarray with vector\n \n Returns:\n :magnitude:\n | ndarray \n \"\"\"\n magnitude = np.sqrt(v[:,0]**2 + v[:,1]**2)\n return magnitude\n\n\n# angle between vectors\ndef angle_v1v2(v1,v2,htype = 'deg'):\n \"\"\"\n Calculates angle between two vectors.\n \n Args:\n :v1: \n | ndarray with vector 1\n :v2: \n | ndarray with vector 2\n :htype:\n | 'deg' or 'rad', optional\n | Requested angle type.\n \n Returns:\n :ang: \n | ndarray \n \"\"\"\n denom = magnitude_v(v1)*magnitude_v(v2)\n denom[denom==0.] = np.nan\n ang = np.arccos(np.sum(v1*v2,axis=1)/denom)\n if htype == 'deg':\n ang = ang*180/np.pi\n return ang\n \n#------------------------------------------------------------------------------\ndef histogram(a, bins=10, bin_center = False, range=None, weights=None, density=False):\n \"\"\"\n Histogram function that can take as bins either the center (cfr. matlab hist) or bin-edges.\n \n Args: \n :bin_center:\n | False, optional\n | False: if :bins: int, str or sequence of scalars:\n | default to numpy.histogram (uses bin edges).\n | True: if :bins: is a sequence of scalars:\n | bins (containing centers) are transformed to edges\n | and nump.histogram is run. \n | Mimicks matlab hist (uses bin centers).\n \n Note:\n For other armuments and output, see ?numpy.histogram\n \n Returns:\n :returns:\n | ndarray with histogram\n \"\"\"\n if (isinstance(bins, list) | isinstance(bins, np.ndarray)) & (bin_center == True):\n if len(bins) == 1:\n edges = np.hstack((bins[0],np.inf))\n else:\n centers = bins\n d = np.diff(centers)/2\n edges = np.hstack((centers[0]-d[0], centers[:-1] + d, centers[-1] + d[-1]))\n edges[1:] = edges[1:] + np.finfo(float).eps\n return np.histogram(a, bins=edges, range=range, weights=weights, density=density)\n\n else:\n return np.histogram(a, bins=bins, range=range, weights=weights, density=density)\n\n#------------------------------------------------------------------------------\ndef v_to_cik(v, inverse = False):\n \"\"\"\n Calculate 2x2 '(covariance matrix)^-1' elements cik \n \n Args:\n :v: \n | (Nx5) np.ndarray\n | ellipse parameters [Rmax,Rmin,xc,yc,theta]\n :inverse:\n | If True: return inverse of cik.\n \n Returns:\n :cik: \n | 'Nx2x2' (covariance matrix)^-1\n \n Notes:\n | cik is not actually a covariance matrix,\n | only for a Gaussian or normal distribution!\n\n \"\"\"\n v = np.atleast_2d(v)\n g11 = (1/v[:,0]*np.cos(v[:,4]))**2 + (1/v[:,1]*np.sin(v[:,4]))**2\n g22 = (1/v[:,0]*np.sin(v[:,4]))**2 + (1/v[:,1]*np.cos(v[:,4]))**2\n g12 = (1/v[:,0]**2 - 1/v[:,1]**2)*np.sin(v[:,4])*np.cos(v[:,4])\n cik = np.zeros((g11.shape[0],2,2))\n\n for i in range(g11.shape[0]):\n cik[i,:,:] = np.vstack((np.hstack((g11[i],g12[i])), np.hstack((g12[i],g22[i]))))\n if inverse == True:\n cik[i,:,:] = np.linalg.inv(cik[i,:,:])\n return cik\n#------------------------------------------------------------------------------\n\ndef cik_to_v(cik, xyc = None, inverse = False):\n \"\"\"\n Calculate v-format ellipse descriptor from 2x2 'covariance matrix'^-1 cik \n \n Args:\n :cik: \n | 'Nx2x2' (covariance matrix)^-1\n :inverse:\n | If True: input is inverse of cik.\n \n \n Returns:\n :v: \n | (Nx5) np.ndarray\n | ellipse parameters [Rmax,Rmin,xc,yc,theta]\n\n Notes:\n | cik is not actually the inverse covariance matrix,\n | only for a Gaussian or normal distribution!\n\n \"\"\"\n if cik.ndim < 3:\n cik = cik[None,...]\n \n if inverse == True:\n for i in range(cik.shape[0]):\n cik[i,:,:] = np.linalg.inv(cik[i,:,:])\n \n g11 = cik[:,0,0]\n g22 = cik[:,1,1] \n g12 = cik[:,0,1]\n\n theta = 0.5*np.arctan2(2*g12,(g11-g22)) + (np.pi/2)*(g12<0)\n #theta = theta2 + (np.pi/2)*(g12<0)\n #theta2 = theta\n cottheta = np.cos(theta)/np.sin(theta) #np.cot(theta)\n cottheta[np.isinf(cottheta)] = 0\n\n a = 1/np.sqrt((g22 + g12*cottheta))\n b = 1/np.sqrt((g11 - g12*cottheta))\n\n # ensure largest ellipse axis is first (correct angle):\n c = b>a; a[c], b[c], theta[c] = b[c],a[c],theta[c]+np.pi/2\n\n v = np.vstack((a, b, np.zeros(a.shape), np.zeros(a.shape), theta)).T\n \n # add center coordinates:\n if xyc is not None:\n v[:,2:4] = xyc\n \n return v\n\ndef fmod(x, y):\n \"\"\"\n Floating point modulus \n \n | e.g., fmod(theta, np.pi * 2) would keep an angle in [0, 2pi]\n\n Args:\n :x:\n | angle to restrict\n :y: \n | end of interval [0, y] to restrict to\n \n Returns:\n :r: floating point modulus\n \"\"\"\n r = x\n while(r < 0):\n r = r + y\n while(r > y):\n r = r - y\n return r\n\ndef remove_outliers(data, alpha = 0.01):\n \"\"\"\n Remove multivariate outliers from data when outside of alpha-level confidence ellipsoid.\n \n Args:\n :data:\n | Nxp ndarray with multivariate data (N samples, p variables)\n :alpha:\n | 0.01, optional\n | Significance level of confidence ellipsoid marking the boundary for outliers.\n \n Return:\n :data:\n | (N-... x p) ndarray with multivariate data; outliers removed.\n \"\"\"\n # delete outliers: \n datac = data.mean(axis=0)\n cov_ = np.cov(data.T)\n f = stats.chi2.ppf(1-alpha, data.shape[1])\n D = mahalanobis2(data, mu = datac, sigmainv = np.linalg.inv(cov_)/f)**0.5\n datan = data.copy()\n datan = datan[D<=1]\n return datan\n\ndef fit_ellipse(xy, center_on_mean_xy = False):\n \"\"\"\n Fit an ellipse to supplied data points.\n\n Args:\n :xy: \n | coordinates of points to fit (Nx2 array)\n :center_on_mean_xy:\n | False, optional\n | Center ellipse on mean of xy \n | (otherwise it might be offset due to solving \n | the contrained minization problem: aT*S*a, see ref below.)\n \n Returns:\n :v:\n | vector with ellipse parameters [Rmax,Rmin, xc,yc, theta (rad.)]\n \n Reference:\n 1. Fitzgibbon, A.W., Pilu, M., and Fischer R.B., \n Direct least squares fitting of ellipsees, \n Proc. of the 13th Internation Conference on Pattern Recognition, \n pp 253–257, Vienna, 1996.\n \"\"\"\n # remove centroid:\n# center = xy.mean(axis=0)\n# xy = xy - center\n \n # Fit ellipse:\n x, y = xy[:,0:1], xy[:,1:2]\n D = np.hstack((x * x, x * y, y * y, x, y, np.ones_like(x)))\n S, C = np.dot(D.T, D), np.zeros([6, 6])\n C[0, 2], C[2, 0], C[1, 1] = 2, 2, -1\n U, s, V = np.linalg.svd(np.dot(np.linalg.pinv(S), C))\n e = U[:, 0]\n# E, V = np.linalg.eig(np.dot(np.linalg.inv(S), C))\n# n = np.argmax(np.abs(E))\n# e = V[:,n]\n \n # get ellipse axis lengths, center and orientation:\n b, c, d, f, g, a = e[1] / 2, e[2], e[3] / 2, e[4] / 2, e[5], e[0]\n \n # get ellipse center:\n num = b * b - a * c\n if num == 0:\n xc = 0\n yc = 0\n else:\n xc = ((c * d - b * f) / num) \n yc = ((a * f - b * d) / num) \n \n # get ellipse orientation:\n theta = np.arctan2(np.array(2 * b), np.array((a - c))) / 2\n# if b == 0:\n# if a > c:\n# theta = 0\n# else:\n# theta = np.pi/2\n# else:\n# if a > c:\n# theta = np.arctan2(2*b,(a-c))/2\n# else:\n# theta = np.arctan2(2*b,(a-c))/2 + np.pi/2\n \n # axis lengths:\n up = 2 * (a * f * f + c * d * d + g * b * b - 2 * b * d * f - a * c * g)\n down1 = (b * b - a * c) * ((c - a) * np.sqrt(1 + 4 * b * b / ((a - c) * (a - c))) - (c + a))\n down2 = (b * b - a * c) * ((a - c) * np.sqrt(1 + 4 * b * b / ((a - c) * (a - c))) - (c + a))\n a, b = np.sqrt(np.abs(up / down1)), np.sqrt(np.abs(up / down2))\n\n\n # assert that a is the major axis (otherwise swap and correct angle)\n if(b > a):\n b, a = a, b\n # ensure the angle is betwen 0 and 2*pi\n theta = fmod(theta, 2.0 * np.pi)\n \n if center_on_mean_xy == True:\n xc,yc = xy.mean(axis=0)\n\n return np.hstack((a, b, xc, yc, theta))\n\n\ndef fit_cov_ellipse(xy, alpha = 0.05, pdf = 'chi2', SE = False, \n robust = False, robust_alpha = 0.01):\n \"\"\"\n Fit covariance ellipse to xy data.\n \n Args:\n :xy: \n | coordinates of points to fit (Nx2 array)\n :alpha:\n | 0.05, optional\n | alpha significance level \n | (e.g alpha = 0.05 for 95% confidence ellipse)\n :pdf:\n | chi2, optional\n | - 'chi2': Rescale using Chi2-distribution\n | - 't': Rescale using Student t-distribution\n | - 'norm': Rescale using normal-distribution\n | - None: don't rescale using pdf, use alpha as scalefactor (cfr. alpha* 1SD or alpha * 1SE)\n :SE:\n | False, optional\n | If false, fit standard error ellipse at alpha significance level\n | If true, fit standard deviation ellipse at alpha significance level\n :robust:\n | False, optional\n | If True: remove outliers beyond the confidence ellipsoid before calculating\n | the covariances.\n :robust_alpha:\n | 0.01, optional\n | Significance level of confidence ellipsoid marking the boundary for outliers.\n \n Returns:\n :v:\n | vector with ellipse parameters [Rmax,Rmin, xc,yc, theta (rad.)]\n \"\"\"\n\n # delete outliers: \n if robust == True:\n xy = remove_outliers(xy, alpha = robust_alpha)\n \n xyc = xy.mean(axis=0)\n cov_ = np.cov(xy.T)\n \n \n cik = np.linalg.inv(cov_)\n \n if pdf == 'chi2':\n f = stats.chi2.ppf(1-alpha, xy.shape[1])\n elif pdf == 't':\n f = stats.t.ppf(1-alpha, xy.shape[0]-1)\n elif pdf =='norm':\n f = stats.norm.ppf(1-alpha)\n elif pdf == 'sample':\n p = xy.shape[1]\n n = xy.shape[0]\n f = (p*(n-1)/(n-p)*stats.f.ppf(1-alpha,p,n-p))\n else:\n f = alpha # -> fraction of Mahalanobis distance\n \n if SE == True:\n f = f/xy.shape[0]\n \n v = cik_to_v(cik/f, xyc=xyc)\n return v\n\n#------------------------------------------------------------------------------\ndef in_hull(p, hull):\n \"\"\"\n Test if points in `p` are in `hull`\n\n Args:\n :p: \n | NxK coordinates of N points in K dimensions\n :hull:\n | Either a scipy.spatial.Delaunay object or the MxK array of the \n | coordinates of M points in K dimensions for which Delaunay \n | triangulation will be computed\n \n Returns:\n :bool:\n | boolean ndarray with True for in-gamut and False for out-of-gamut points\n \"\"\"\n if not isinstance(hull,sp.spatial.Delaunay):\n hull = sp.spatial.Delaunay(hull)\n return hull.find_simplex(p)>=0\n\n#------------------------------------------------------------------------------\n_SPRAGUE_COEFFICIENTS = np.array([\n [884, -1960, 3033, -2648, 1080, -180],\n [508, -540, 488, -367, 144, -24],\n [-24, 144, -367, 488, -540, 508],\n [-180, 1080, -2648, 3033, -1960, 884],\n ]).T / 209.0\ndef interp1_sprague5(x, y, xn, extrap = (np.nan, np.nan)):\n \"\"\" \n Perform a 1-dimensional 5th order Sprague interpolation.\n \n Args:\n :x:\n | ndarray with n-dimensional coordinates.\n :y: \n | ndarray with values at coordinates in x.\n :xn:\n | ndarray of new coordinates.\n :extrap:\n | (np.nan, np.nan) or string, optional\n | If tuple: fill with values in tuple (x[-1])\n | If string: ('zeros','linear', 'nearest', 'nearest-up', 'zero', 'slinear', 'quadratic', 'cubic', 'previous','next')\n | for more info on the other options see: scipy.interpolate.interp1d?\n Returns:\n :yn:\n | ndarray with values at new coordinates in xn.\n \"\"\"\n # Do extrapolation:\n if ((xnx[-1])).any(): # extrapolation needed !\n if isinstance(extrap,tuple):\n if extrap[0] == extrap[1]: \n yne = np.ones((y.shape[0],len(xn)))*extrap[0]\n else:\n yne = np.zeros((y.shape[0],len(xn)))\n yne[:,(xnx[-1])] = extrap[1]\n elif isinstance(extrap,str):\n yne = interp1d(x, y, kind = extrap, bounds_error = False, fill_value = 'extrapolate')(xn)\n else:\n raise Exception('Invalid option for extrap argument. Only tuple and string allowed.')\n xn_x = xn[(xn>=x[0]) & (xn<=x[-1])]\n else:\n xn_x = xn\n yne = None\n \n # Check equal x-spacing:\n dx = np.diff(x)\n if np.all(dx == dx[0]):\n dx = dx[0] \n else:\n raise Exception('Elements in x are not equally spaced!')\n \n # Extrapolate x, y with required additional elements for Sprague to work:\n xe = np.hstack((x[0] - 2*dx, x[0] - dx, x, x[-1] + dx, x[-1] + 2*dx))\n \n y = np.atleast_2d(y)\n ye1 = (y[:, :6] @ _SPRAGUE_COEFFICIENTS[:,0])[:,None]\n ye2 = (y[:, :6] @ _SPRAGUE_COEFFICIENTS[:,1])[:,None]\n ye3 = (y[:,-6:] @ _SPRAGUE_COEFFICIENTS[:,2])[:,None]\n ye4 = (y[:,-6:] @ _SPRAGUE_COEFFICIENTS[:,3])[:,None]\n ye = np.hstack((ye1,ye2,y,ye3,ye4)).T\n \n \n # Evaluate at xn_x (no extrapolation!!):\n i = np.searchsorted(xe, xn_x) - 1\n X = np.atleast_2d((xn_x - xe[i]) / (xe[i + 1] - xe[i])).T\n\n a0 = ye[i]\n a1 = ((2 * ye[i - 2] - 16 * ye[i - 1] + 16 * ye[i + 1] - 2 * ye[i + 2]) / 24) \n a2 = ((-ye[i - 2] + 16 * ye[i - 1] - 30 * ye[i] + 16 * ye[i + 1] - ye[i + 2]) / 24) \n a3 = ((-9 * ye[i - 2] + 39 * ye[i - 1] - 70 * ye[i] + 66 * ye[i + 1] - 33 * ye[i + 2] + 7 * ye[i + 3]) / 24)\n a4 = ((13 * ye[i - 2] - 64 * ye[i - 1] + 126 * ye[i] - 124 * ye[i + 1] + 61 * ye[i + 2] - 12 * ye[i + 3]) / 24)\n a5 = ((-5 * ye[i - 2] + 25 * ye[i - 1] - 50 * ye[i] + 50 * ye[i + 1] - 25 * ye[i + 2] + 5 * ye[i + 3]) / 24)\n\n yn = (a0 + a1*X + a2*X**2 + a3*X**3 + a4*X**4 + a5*X**5).T\n \n if yne is None:\n return yn\n else:\n yne[:,(xn>=x[0]) & (xn<=x[-1])] = yn\n return yne\n\n#------------------------------------------------------------------------------\ndef interp1(X,Y,Xnew, kind = 'linear', ext = 'extrapolate', w = None, bbox=[None, None], check_finite = False):\n \"\"\"\n Perform a 1-dimensional linear interpolation (wrapper around scipy.interpolate.InterpolatedUnivariateSpline).\n \n Args:\n :X: \n | ndarray with n-dimensional coordinates (last axis represents dimension)\n :Y: \n | ndarray with values at coordinates in X\n :Xnew: \n | ndarray of new coordinates (last axis represents dimension)\n :kind:\n | str or int, optional\n | if str: kind is 'translated' to an int value for input to scipy.interpolate.InterpolatedUnivariateSpline()\n | supported options for str: 'linear', 'quadratic', 'cubic', 'quartic', 'quintic'\n :other args:\n | see scipy.interpolate.InterpolatedUnivariateSpline()\n \n Returns:\n :Ynew:\n | ndarray with new values at coordinates in Xnew\n \"\"\"\n k = ['linear', 'quadratic', 'cubic', 'quartic', 'quintic'].index(kind) + 1\n if ext == 'nearest': ext = 'const'\n return sp.interpolate.InterpolatedUnivariateSpline(X,Y, ext = ext, k = k, w = w, bbox = bbox, check_finite = check_finite)(Xnew)\n#------------------------------------------------------------------------------\ndef ndinterp1_scipy(X,Y,Xnew, fill_value = np.nan, rescale = False): \n \"\"\"\n Perform a n-dimensional linear interpolation (wrapper around scipy.interpolate.LinearNDInterpolator).\n \n Args:\n :X: \n | ndarray with n-dimensional coordinates (last axis represents dimension)\n :Y: \n | ndarray with values at coordinates in X\n :Xnew: \n | ndarray of new coordinates (last axis represents dimension)\n :fill_value: \n | float, optional\n | Value used to fill in for requested points outside of the\n | convex hull of the input points. If not provided, then\n | the default is ``nan``.\n :rescale:\n | bool, optional\n | Rescale points to unit cube before performing interpolation.\n | This is useful if some of the input dimensions have\n | incommensurable units and differ by many orders of magnitude.\n \n Returns:\n :Ynew:\n | ndarray with new values at coordinates in Xnew\n \"\"\"\n return sp.interpolate.LinearNDInterpolator(X,Y, fill_value = fill_value, rescale = rescale).__call__(Xnew)\n\ndef ndinterp1(X, Y, Xnew):\n \"\"\"\n Perform nd-dimensional linear interpolation using Delaunay triangulation.\n \n Args:\n :X: \n | ndarray with n-dimensional coordinates (last axis represents dimension).\n :Y: \n | ndarray with values at coordinates in X.\n :Xnew: \n | ndarray of new coordinates (last axis represents dimension).\n | When outside of the convex hull of X, then a best estimate is \n | given based on the closest vertices.\n \n Returns:\n :Ynew:\n | ndarray with new values at coordinates in Xnew.\n \"\"\"\n #get dimensions:\n n = Xnew.shape[-1]\n # create an object with triangulation\n tri = sp.spatial.Delaunay(X) \n # find simplexes that contain interpolated points\n s = tri.find_simplex(Xnew)\n # get the vertices for each simplex\n v = tri.vertices[s]\n # get transform matrices for each simplex (see explanation bellow)\n m = tri.transform[s]\n # for each interpolated point p, mutliply the transform matrix by \n # vector p-r, where r=m[:,n,:] is one of the simplex vertices to which \n # the matrix m is related to (again, see below)\n b = np.einsum('ijk,ik->ij', m[:,:n,:n], Xnew-m[:,n,:])\n \n # get the weights for the vertices; `b` contains an n-dimensional vector\n # with weights for all but the last vertices of the simplex\n # (note that for n-D grid, each simplex consists of n+1 vertices);\n # the remaining weight for the last vertex can be copmuted from\n # the condition that sum of weights must be equal to 1\n w = np.c_[b, 1-b.sum(axis=1)]\n \n # normalize weigths:\n w = w/w.sum(axis=1, keepdims=True)\n \n # interpolate:\n if Y[v].ndim == 3:\n Ynew = np.einsum('ijk,ij->ik', Y[v], w)\n else:\n Ynew = np.einsum('ij,ij->i', Y[v], w)\n \n return Ynew\n\ndef box_m(*X, ni = None, verbosity = 0, robust = False, robust_alpha = 0.01):\n \"\"\"\n Perform Box's M test (p>=2) to check equality of covariance matrices or Bartlett's test (p==1) for equality of variances.\n \n Args:\n :X: \n | A number (k groups) or list of 2d-ndarrays (rows: samples, cols: variables) with data.\n | or a number of 2d-ndarrays with covariance matrices (supply ni!)\n :ni:\n | None, optional\n | If None: X contains data, else, X contains covariance matrices.\n :verbosity: \n | 0, optional\n | If 1: print results.\n :robust:\n | False, optional\n | If True: remove outliers beyond the confidence ellipsoid before calculating\n | the covariances.\n :robust_alpha:\n | 0.01, optional\n | Significance level of confidence ellipsoid marking the boundary for outliers.\n \n Returns:\n :statistic:\n | F or chi2 value (see len(dfs))\n :pval:\n | p-value\n :df:\n | degrees of freedom.\n | if len(dfs) == 2: F-test was used.\n | if len(dfs) == 1: chi2 approx. was used.\n \n Notes:\n 1. If p==1: Reduces to Bartlett's test for equal variances.\n 2. If (ni>20).all() & (p<6) & (k<6): then a more appropriate chi2 test is used in a some cases.\n \"\"\"\n\n k = len(X) # groups\n p = np.atleast_2d(X[0]).shape[1] # variables\n if p == 1: # for p == 1: only variance!\n det = lambda x: np.array(x)\n else:\n det = lambda x: np.linalg.det(x)\n if ni is None: # samples in each group\n \n # remove outliers before calculation of box M:\n if robust == True:\n X = [remove_outliers(Xi, alpha = robust_alpha) for Xi in X]\n \n ni = np.array([Xi.shape[0] for Xi in X])\n Si = np.array([np.cov(Xi.T) for Xi in X])\n if p == 1:\n Si = np.atleast_2d(Si).T\n else:\n Si = np.array([Xi for Xi in X]) # input are already cov matrices!\n ni = np.array(ni)\n if ni.shape[0] == 1:\n ni = ni*np.ones((k,))\n \n N = ni.sum()\n S = np.array([(ni[i]-1)*Si[i] for i in range(len(ni))]).sum(axis=0)/(N - k)\n\n M = (N-k)*np.log(det(S)) - ((ni-1)*np.log(det(Si))).sum()\n if p == 1:\n M = M[0]\n A1 = (2*p**2 + 3*p -1)/(6*(p+1)*(k-1))*((1/(ni-1)) - 1/(N - k)).sum()\n v1 = p*(p+1)*(k-1)/2\n A2 = (p-1)*(p+2)/(6*(k-1))*((1/(ni-1)**2) - 1/(N - k)**2).sum()\n\n if (A2 - A1**2) > 0:\n v2 = (v1 + 2)/(A2 - A1**2)\n b = v1/(1 - A1 -(v1/v2))\n Fv1v2 = M/b\n statistic = Fv1v2\n pval = 1.0 - sp.stats.f.cdf(Fv1v2,v1,v2)\n dfs = [v1,v2]\n \n if verbosity == 1:\n print('M = {:1.4f}, F = {:1.4f}, df1 = {:1.1f}, df2 = {:1.1f}, p = {:1.4f}'.format(M,Fv1v2,v1,v2,pval))\n else:\n v2 = (v1 + 2)/(A1**2 - A2)\n b = v2/(1 - A1 + (2/v2))\n Fv1v2 = v2*M/(v1*(b - M))\n statistic = Fv1v2\n pval = 1.0 - sp.stats.f.cdf(Fv1v2,v1,v2)\n dfs = [v1,v2]\n\n if (ni>20).all() & (p<6) & (k<6): #use Chi2v1\n chi2v1 = M*(1-A1)\n statistic = chi2v1\n pval = 1.0 - sp.stats.chi2.cdf(chi2v1,v1)\n dfs = [v1]\n if verbosity == 1:\n print('M = {:1.4f}, chi2 = {:1.4f}, df1 = {:1.1f}, p = {:1.4f}'.format(M,chi2v1,v1,pval))\n\n else:\n if verbosity == 1:\n print('M = {:1.4f}, F = {:1.4f}, df1 = {:1.1f}, df2 = {:1.1f}, p = {:1.4f}'.format(M,Fv1v2,v1,v2,pval))\n\n return statistic, pval, dfs\n\ndef pitman_morgan(X,Y, verbosity = 0):\n \"\"\"\n Pitman-Morgan Test for the difference between correlated variances with paired samples.\n \n Args:\n :X,Y: \n | ndarrays with data.\n :verbosity: \n | 0, optional\n | If 1: print results. \n \n Returns:\n :tval:\n | statistic\n :pval:\n | p-value\n :df:\n | degree of freedom.\n :ratio:\n | variance ratio var1/var2 (with var1 > var2).\n\n Note:\n 1. Based on Gardner, R.C. (2001). Psychological Statistics Using SPSS for Windows. New Jersey, Prentice Hall.\n 2. Python port from matlab code by Janne Kauttonen (https://nl.mathworks.com/matlabcentral/fileexchange/67910-pitmanmorgantest-x-y; accessed Sep 26, 2019)\n \"\"\"\n N = X.shape[0]\n var1, var2 = X.var(axis=0),Y.var(axis=0)\n cor = np.corrcoef(X,Y)[0,1]\n \n # must have var1 > var2:\n if var1 < var2:\n var1, var2 = var2, var1\n\n ratio = var1/var2\n \n # formulas from Garder (2001, p.57):\n numerator1_S1minusS2 = var1-var2\n numerator2_SQRTnminus2 = np.sqrt(N-2)\n numerator3 = numerator1_S1minusS2*numerator2_SQRTnminus2\n denominator1_4timesS1timesS2 = 4*var1*var2\n denominator2_rSquared = cor**2\n denominator3_1minusrSquared = 1.0 - denominator2_rSquared\n denominator4_4timesS1timesS2div1minusrSquared = denominator1_4timesS1timesS2*denominator3_1minusrSquared\n denominator5 = np.sqrt(denominator4_4timesS1timesS2div1minusrSquared)\n df = N-2\n if denominator5 == 0:\n denominator5 = _EPS\n tval = numerator3/denominator5\n \n # compute stats:\n p = 2*(1.0-sp.stats.t.cdf(tval,df))\n if verbosity == 1:\n print('tval = {:1.4f}, df = {:1.1f}, p = {:1.4f}'.format(tval,df, p))\n\n return tval, p, df, ratio\n\ndef stress(DE,DV, axis = 0, max_scale = 100):\n \"\"\"\n Calculate STandardize-Residual-Sum-of-Squares (STRESS).\n \n Args:\n :DE, DV: \n | ndarrays of data to be compared.\n :axis:\n | 0, optional\n | axis with samples\n :max_scale:\n | 100, optional\n | Maximum of scale.\n \n Returns:\n :stress:\n | nadarray with stress value(s).\n \n Reference:\n 1. `Melgosa, M., García, P. A., Gómez-Robledo, L., Shamey, R., Hinks, D., Cui, G., & Luo, M. R. (2011). \n Notes on the application of the standardized residual sum of squares index \n for the assessment of intra- and inter-observer variability in color-difference experiments. \n Journal of the Optical Society of America A, 28(5), 949–953. \n `_\n \"\"\"\n F = (DE**2).sum(axis = axis, keepdims = True)/(DE*DV).sum(axis = axis, keepdims = True)\n return max_scale*(((DE - F*DV)**2).sum(axis = axis, keepdims = True)/(F**2*DV**2).sum(axis = axis, keepdims = True))**0.5\n\ndef stress_F_test(stressA, stressB, N, alpha = 0.05):\n \"\"\" \n Perform F-test on significance of difference between STRESS A and STRESS B.\n \n Args:\n :stressA, stressB:\n | ndarray with stress(es) values for A and B\n :N:\n | int or ndarray with number of samples used to determine stress values.\n :alpha:\n | 0.05, optional\n | significance level\n \n Returns:\n :Fstats:\n | Dictionary with keys:\n | - 'p': p-values\n | - 'F': F-values\n | - 'Fc': critcal values\n | - 'H': string reporting on significance of A compared to B.\n \"\"\"\n N = N*np.ones(stressA.shape[0])\n Fvs = np.nan*np.ones_like(stressA)\n ps = Fvs.copy()\n Fcs = Fvs.copy()\n H = []\n i = 0\n for stA, stB in zip(stressA,stressB):\n Ni = N[i]\n Fvs[i] = stA**2/stB**2\n ps[i] = stats.f.sf(Fvs[i], Ni-1, Ni-1)\n Fcs[i] = stats.f.ppf(q = alpha/2, dfn = Ni - 1, dfd = Ni-1)\n if Fvs[i] < Fcs[i]:\n H_ = \"A significantly better than B\"\n elif Fvs[i] > 1/Fcs[i]:\n H_ = \"A significantly poorer than B\"\n elif (Fcs[i] <= Fvs[i]) & (Fvs[i] < 1):\n H_ = \"A insignificantly better than B\"\n elif (1 < Fvs[i]) & (Fvs[i] <= 1/Fcs[i]):\n H_ = \"A insignificanty poorer than B\"\n elif (Fvs[i] == 1):\n H_ = \"A equals B\"\n H.append(H_)\n i+=1\n Fstats = {'p': ps, 'F': Fvs, 'Fc': Fcs, 'H': H}\n return Fstats\n\ndef mean_distance_weighted(x, axis = 0, keepdims = False, center_x = False, rtol = 1e-3, max_iter = 100, cnt = 0,mu = None, mu0 = 0):\n \"\"\"\n Recursively calculate distance weighted mean.\n \n Args: \n :x:\n | ndarray with data\n :axis:\n | dimension along which to take mean\n :keepdims:\n | False, optional\n | If True: keep dimension of original ndarray\n :center_x:\n | True, optional\n | Center data first.\n :rtol:\n | 1e-3, optional\n | Relative tolerance on recursive mean values. If two sequential\n | mean values differ less than this amount, the recursion stops.\n :max_iter:\n | 100, optional\n | Maximum amount of recursions. If this number is reached the \n | recursion stops, even when rtol is not yet achieved. (to avoid\n | getting stuck in an infinite loop when the recursion doesn't converge)\n :cnt,mu,mu0:\n | Needed for passing values across recursions to be able to stop them.\n | DO NOT CHANGE.\n \n Returns:\n :mu_dw:\n | distance weighted mean of the array\n \n \"\"\"\n cnt += 1 \n if mu is None: \n if center_x: \n mu0 = np.mean(x, axis = axis, keepdims = True)\n x = x - mu0\n mu = np.mean(x, axis = axis, keepdims = True)\n \n w = (((x - mu)**2).sum(axis=-1,keepdims=True)**0.5)\n # w = (((x - mu)**2))**0.5\n w[w==0] = 1e-100\n w = 1 / w\n w = w / np.sum(w, axis = axis, keepdims = True)\n mu_prev = mu\n mu = np.sum(w*x,axis = axis, keepdims = True)\n if ((np.abs((mu-mu_prev)/(mu + 1e-100)) > rtol).any()) & (cnt < max_iter):\n mu = mean_distance_weighted(x, axis = axis, keepdims = keepdims,mu = mu, mu0 = mu0, max_iter = max_iter, cnt = cnt) \n return mu\n else:\n if keepdims:\n return mu0 + mu\n else:\n return np.squeeze(mu0 + mu, axis = axis)\n","repo_name":"ksmet1977/luxpy","sub_path":"luxpy/math/basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":47953,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"4"} +{"seq_id":"19482452277","text":"from struct import Struct\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import from_json, to_json, col, unbase64, base64, split, expr\nfrom pyspark.sql.types import StructField, StructType, FloatType,StringType, BooleanType, ArrayType, DateType\n\nredisServerSchema = StructType([\n StructField(\"key\",StringType()),\n StructField(\"existType\",StringType()),\n StructField(\"Ch\",BooleanType()),\n StructField(\"Incr\",BooleanType()),\n StructField(\"zSetEntries\",ArrayType(\n StructType([\n StructField(\"element\",StringType()),\n StructField(\"Score\",StringType())\n ])\n ))\n])\n\ncustomerJSONSchema = StructType([\n StructField(\"customerName\", StringType()),\n StructField(\"email\",StringType()),\n StructField(\"phone\",StringType()),\n StructField(\"birthYear\", StringType())\n # StructField(\"birthDay\",DateType())\n])\n\nstediEventsSchema = StructType([\n StructField(\"customer\",StringType()),\n StructField(\"score\",FloatType()),\n # StructField(\"score\",StringType()),\n StructField(\"riskDate\",DateType())\n])\n\n\nspark = SparkSession.builder.appName(\"RedisKafkaStream\").getOrCreate()\nspark.sparkContext.setLogLevel('WARN')\n\nredisServerRawStreamingDF = spark \\\n .readStream \\\n .format(\"kafka\") \\\n .option(\"kafka.bootstrap.servers\", \"localhost:9092\") \\\n .option(\"subscribe\", \"redis-server\") \\\n .option(\"startingOffsets\",\"earliest\") \\\n .load()\n\nredisServerStreamingDF = redisServerRawStreamingDF \\\n .selectExpr(\"CAST(value AS STRING)\")\n\nredisServerStreamingDF.withColumn(\"value\", from_json(\"value\",redisServerSchema)) \\\n .select(col('value.*')) \\\n .createOrReplaceTempView(\"RedisSortedSet\")\n\nspark.sql(\"SELECT key, zSetEntries[0].element as encodedCustomer \\\n FROM RedisSortedSet\")\n\ncustomerJSONDF = spark.sql(\"SELECT CAST(unbase64(encodedCustomer) AS STRING \\\n as customerJSON from RedisSortedSet)\")\n\ncustomerJSONDF.withColumn(\"customer\", from_json(\"customerJSON\",customerJSONSchema)) \\\n .select(col('customer.*')) \\\n .createOrReplaceTempView(\"CustomerRecords\")\n\n\nemailAndBirthDayStreamingDF = spark.sql(\"SELECT email, brithDay FROM CustomerRecords \\\n WHERE email IS NOT NULL AND birthDay IS NOT NULL\")\n\n\nemailAndBirthYearStreamingDF = emailAndBirthDayStreamingDF \\\n .withColumn(\"birthYear\", split(emailAndBirthDayStreamingDF['birthDay'],'-')[0])\n\n\nemailAndBirthYearStreamingDF = emailAndBirthDayStreamingDF.select('email','birthYear')\n\nstediEventsRawStreamingDf = spark \\\n .readStream \\\n .format(\"kafka\") \\\n .option(\"kafka.bootstrap.servers\",\"localhost:9092\") \\\n .option(\"subscribe\", \"stedi-events\") \\\n .option(\"startingOffsets\", \"earliest\") \\\n .load()\n\n\nstediEventsStreamingDF = stediEventsRawStreamingDf.selectExpr(\"CAST(value AS STRING)\")\n\n\nstediEventsStreamingDF.withColumn(\"value\", from_json(\"value\", stediEventsSchema)) \\\n .select(col('value.*')) \\\n .createOrReplaceTempView(\"CustomerRisk\")\n\ncustomerRiskStreamingDF = spark.sql(\"SELECT customer, score FROM CustomerRisk\")\n\njoinedDF = customerRiskStreamingDF.join(emailAndBirthYearStreamingDF, \\\n customerRiskStreamingDF.customer == emailAndBirthYearStreamingDF.email)\n\nquery = joinedDF \\\n .selectExpr(\"to_json(struct(*)) AS value\") \\\n .writeStream \\\n .format(\"kafka\") \\\n .option(\"kafka.bootstrap.servers\", \"localhost:9092\") \\\n .option(\"topic\",\"stedi-graph\") \\\n .start() \\\n .awaitTermination()\n\n","repo_name":"kcw2297/EvaluateHumanBalance","sub_path":"project/starter/sparkpykafkajoin.py","file_name":"sparkpykafkajoin.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"38781343488","text":"import time\nrolls = {3: 1, 4: 3, 5: 6, 6: 7, 7: 6, 8: 3, 9: 1}\ncache = {}\n\n\ndef universes(player_a, player_b, turn):\n global cache\n if player_a[1] >= 21:\n return (1, 0)\n if player_b[1] >= 21:\n return (0, 1)\n\n if str((player_a, player_b, turn)) in cache:\n return cache[str((player_a, player_b, turn))]\n value_a = 0\n value_b = 0\n if turn == 0:\n for i in range(3, 10):\n position = (player_a[0] + i - 1) % 10 + 1\n player = [position, player_a[1] + position]\n (a_win, b_win) = universes(player, player_b, 1)\n value_a = value_a + rolls[i] * a_win\n value_b = value_b + rolls[i] * b_win\n else:\n for i in range(3, 10):\n position = (player_b[0] + i - 1) % 10 + 1\n player = [position, player_b[1] + position]\n (a_win, b_win) = universes(player_a, player, 0)\n value_a = value_a + rolls[i] * a_win\n value_b = value_b + rolls[i] * b_win\n cache[str((player_a, player_b, turn))] = (value_a, value_b)\n return (value_a, value_b)\n\n\ndef puzzle(data):\n total = 0\n players = []\n for line in data:\n line = line.replace('\\n', '')\n players.append(int(line.split(': ')[1]))\n\n (a_win, b_win) = universes([players[0], 0], [players[1], 0], 0)\n total = max(a_win, b_win)\n print(\"Answer: \" + str(total))\n\n\ndata = open(__file__.replace('.py', 'input'))\nstart = time.perf_counter()\npuzzle(data.readlines())\nend = time.perf_counter()\nprint(\"Time: \", end=\"\")\ntime_taken = end - start\nif time_taken * 1000 < 1:\n print(time_taken * 1000000, end=\"\")\n print(\"ns\")\nelse:\n print(time_taken * 1000, end=\"\")\n print(\"ms\")\n","repo_name":"Thespyinthehole/AdventOfCode2021","sub_path":"Day21/puzzle2.py","file_name":"puzzle2.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"3176734828","text":"def matrix_subdivide(matrix, subsize):\n new_matrix = []\n for i in range(size - subsize + 1):\n for j in range(size - subsize + 1):\n sub_matrix = []\n for k in range(subsize):\n sub_matrix.append(matrix[i + k][j:j + subsize])\n\n new_matrix.append(sub_matrix)\n\n return new_matrix\n\n\nsize = 6\nsub_size = 5\nmatrix = [\n ['1', 'R', 'T', 'Y', '1', '1'],\n ['1', 'W', '0', '2', '1', '1'],\n ['1', '3', '1', '4', '1', '1'],\n ['1', 'X', '6', 'Q', '1', '1'],\n ['1', 'A', 'B', 'C', '1', '1'],\n ['1', '1', '1', '1', '1', '1'],\n]\n\nprint(*matrix, sep='\\n')\nprint()\n\nfor z in matrix_subdivide(matrix, sub_size):\n print(*z, sep='\\n')\n print()","repo_name":"simonen/PythonAdvanced","sub_path":"99. Personal/01. Matrix subdivision.py","file_name":"01. Matrix subdivision.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"32165705441","text":"\"\"\"\nThe workflow builder factory method.\n\nAll the checks and the construction of the workflow are done\ninside this function that has pickleable inputs and output\ndictionary (``retval``) to allow isolation using a\n``multiprocessing.Process`` that allows aslprep to enforce\na hard-limited memory-scope.\n\n\"\"\"\n\n\ndef build_workflow(config_file, retval):\n \"\"\"Create the Nipype Workflow that supports the whole execution graph.\"\"\"\n from niworkflows.reports.core import generate_reports\n from niworkflows.utils.bids import check_pipeline_version, collect_participants\n from niworkflows.utils.misc import check_valid_fs_license\n\n from aslprep import config\n from aslprep.utils.misc import check_deps\n from aslprep.workflows.base import init_aslprep_wf\n\n config.load(config_file)\n build_log = config.loggers.workflow\n\n output_dir = config.execution.output_dir\n version = config.environment.version\n\n retval[\"return_code\"] = 1\n retval[\"workflow\"] = None\n\n # warn if older results exist: check for dataset_description.json in output folder\n msg = check_pipeline_version(version, output_dir / \"aslprep\" / \"dataset_description.json\")\n if msg is not None:\n build_log.warning(msg)\n\n # Please note this is the input folder's dataset_description.json\n dset_desc_path = config.execution.bids_dir / \"dataset_description.json\"\n if dset_desc_path.exists():\n from hashlib import sha256\n\n desc_content = dset_desc_path.read_bytes()\n config.execution.bids_description_hash = sha256(desc_content).hexdigest()\n\n # First check that bids_dir looks like a BIDS folder\n subject_list = collect_participants(\n config.execution.bids_dir, participant_label=config.execution.participant_label\n )\n\n # Called with reports only\n if config.execution.reports_only:\n from pkg_resources import resource_filename as pkgrf\n\n build_log.log(25, \"Running --reports-only on participants %s\", \", \".join(subject_list))\n retval[\"return_code\"] = generate_reports(\n subject_list,\n config.execution.output_dir,\n config.execution.run_uuid,\n config=pkgrf(\"aslprep\", \"data/reports-spec.yml\"),\n packagename=\"aslprep\",\n )\n return retval\n\n # Build main workflow\n init_msg = f\"\"\"\n Running ASLPREP version {config.environment.version}:\n * BIDS dataset path: {config.execution.bids_dir}.\n * Participant list: {subject_list}.\n * Run identifier: {config.execution.run_uuid}.\n * Output spaces: {config.execution.output_spaces}.\"\"\"\n\n if config.execution.anat_derivatives:\n init_msg += f\"\"\"\n * Anatomical derivatives: {config.execution.anat_derivatives}.\"\"\"\n build_log.log(25, init_msg)\n\n retval[\"workflow\"] = init_aslprep_wf()\n\n # Check for FS license after building the workflow\n if not check_valid_fs_license():\n build_log.critical(\n \"\"\"\\\nERROR: a valid license file is required for FreeSurfer to run. ASLPrep looked for an existing \\\nlicense file at several paths, in this order: 1) command line argument ``--fs-license-file``; \\\n2) ``$FS_LICENSE`` environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. Get it \\\n(for free) by registering at https://surfer.nmr.mgh.harvard.edu/registration.html\"\"\"\n )\n retval[\"return_code\"] = 126 # 126 == Command invoked cannot execute.\n return retval\n\n # Check workflow for missing commands\n missing = check_deps(retval[\"workflow\"])\n if missing:\n build_log.critical(\n \"Cannot run ASLPrep. Missing dependencies:%s\",\n \"\\n\\t* \".join([\"\"] + [f\"{cmd} (Interface: {iface})\" for iface, cmd in missing]),\n )\n retval[\"return_code\"] = 127 # 127 == command not found.\n return retval\n\n config.to_filename(config_file)\n build_log.info(\n \"ASLPrep workflow graph with %d nodes built successfully.\",\n len(retval[\"workflow\"]._get_all_nodes()),\n )\n retval[\"return_code\"] = 0\n return retval\n\n\ndef build_boilerplate(config_file, workflow):\n \"\"\"Write boilerplate in an isolated process.\"\"\"\n from aslprep import config\n\n config.load(config_file)\n logs_path = config.execution.output_dir / \"aslprep\" / \"logs\"\n boilerplate = workflow.visit_desc()\n citation_files = {ext: logs_path / f\"CITATION.{ext}\" for ext in (\"bib\", \"tex\", \"md\", \"html\")}\n\n if boilerplate:\n # To please git-annex users and also to guarantee consistency\n # among different renderings of the same file, first remove any\n # existing one\n for citation_file in citation_files.values():\n try:\n citation_file.unlink()\n except FileNotFoundError:\n pass\n\n citation_files[\"md\"].write_text(boilerplate)\n\n if not config.execution.md_only_boilerplate and citation_files[\"md\"].exists():\n from shutil import copyfile\n from subprocess import CalledProcessError, TimeoutExpired, check_call\n\n from pkg_resources import resource_filename as pkgrf\n\n # Generate HTML file resolving citations\n cmd = [\n \"pandoc\",\n \"-s\",\n \"--bibliography\",\n pkgrf(\"aslprep\", \"data/boilerplate.bib\"),\n \"--filter\",\n \"pandoc-citeproc\",\n \"--metadata\",\n 'pagetitle=\"ASLPrep citation boilerplate\"',\n str(citation_files[\"md\"]),\n \"-o\",\n str(citation_files[\"html\"]),\n ]\n\n config.loggers.cli.info(\"Generating an HTML version of the citation boilerplate...\")\n try:\n check_call(cmd, timeout=10)\n except (FileNotFoundError, CalledProcessError, TimeoutExpired):\n config.loggers.cli.warning(\"Could not generate CITATION.html file:\\n%s\", \" \".join(cmd))\n\n # Generate LaTex file resolving citations\n cmd = [\n \"pandoc\",\n \"-s\",\n \"--bibliography\",\n pkgrf(\"aslprep\", \"data/boilerplate.bib\"),\n \"--natbib\",\n str(citation_files[\"md\"]),\n \"-o\",\n str(citation_files[\"tex\"]),\n ]\n config.loggers.cli.info(\"Generating a LaTeX version of the citation boilerplate...\")\n try:\n check_call(cmd, timeout=10)\n except (FileNotFoundError, CalledProcessError, TimeoutExpired):\n config.loggers.cli.warning(\"Could not generate CITATION.tex file:\\n%s\", \" \".join(cmd))\n else:\n copyfile(pkgrf(\"aslprep\", \"data/boilerplate.bib\"), citation_files[\"bib\"])\n","repo_name":"PennLINC/aslprep","sub_path":"aslprep/cli/workflow.py","file_name":"workflow.py","file_ext":"py","file_size_in_byte":6563,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"4"} +{"seq_id":"1269156361","text":"from collections import deque\n \nn,k=map(int, input().split())\nans=[]\ndeq=deque()\nfor i in range(1,n+1):\n deq.append(i)\n \n \nwhile deq:\n deq.rotate(-k)\n ans.append(deq.pop())\nprint('<',', '.join(str(i) for i in ans),'>',sep='')","repo_name":"ohjiae/Algorithm_Study_2022","sub_path":"simulation/div-leejaemyeong/BOJ_11866_요세푸스_문제_0.py","file_name":"BOJ_11866_요세푸스_문제_0.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"4"} +{"seq_id":"51156574","text":"n=input()\nv=n\ncount=0\ng=['a','e','i','o','u','A','E','I','O','U']\nfor i in range(len(n)):\n if n[i] in g:\n count+=1\nif len(g)==0:\n print(0)\nelse:\n print(count)\n ","repo_name":"veeru371/codemind-python","sub_path":"vowel_count.py","file_name":"vowel_count.py","file_ext":"py","file_size_in_byte":179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"1931579709","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# ----------------------------------------------------------------------\n# Author: sebastian.piec@\n# Modified: 2019, March 11\n# ----------------------------------------------------------------------\n\nseed = 999\nagent = \"output/jetdqn_v1.pt\"\ntraining_log = \"logs/training.log\"\n\n# dqn\nn_episodes = 100\n\n# evolutionary agents\npool_size = 120\nn_generations = 40\n\n# ----------------------------------------------------------------------\ndef read_args():\n global seed, agent, n_episodes\n\n import argparse\n parser = argparse.ArgumentParser(description='')\n parser.add_argument(\"-s\", \"--seed\", type=int, default=seed,\n help=\"random seed\")\n parser.add_argument(\"-e\", \"--episodes\", type=int, default=n_episodes,\n help=\"# of episodes\")\n parser.add_argument(\"-a\", \"--agent\", type=str, default=agent,\n help=\"agent file\")\n args = parser.parse_args()\n\n n_episodes = args.episodes\n seed = args.seed\n agent = args.agent\n","repo_name":"spiec/reinforcement","sub_path":"experiments/jet/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"36992318705","text":"# 문제\n# 정수 X에 사용할 수 있는 연산은 다음과 같이 세 가지 이다.\n#\n# X가 3으로 나누어 떨어지면, 3으로 나눈다.\n# X가 2로 나누어 떨어지면, 2로 나눈다.\n# 1을 뺀다.\n# 정수 N이 주어졌을 때, 위와 같은 연산 세 개를 적절히 사용해서 1을 만들려고 한다. 연산을 사용하는 횟수의 최솟값을 출력하시오.\n#\n# 입력\n# 첫째 줄에 1보다 크거나 같고, 106보다 작거나 같은 정수 N이 주어진다.\n#\n# 출력\n# 첫째 줄에 연산을 하는 횟수의 최솟값을 출력한다.\n\ndef cal(x):\n if x%3==0:\n return x//3\n elif x%2==0:\n return x//2\n else:\n return x-1\n\ndef calAll(n):\n x[1] = 0\n for i in range(2, n+1):\n prev = i-1\n if i%3==0 and x[i//3] < x[prev]:\n prev = i//3\n if i%2==0 and x[i//2] < x[prev]:\n prev = i//2\n x[i] = x[prev]+1\n\nn = int(input())\ncount = 0\nx = [0]*(n+1)#x[n] = n을 1로 만드는 최소 연산 횟수\ncalAll(n)\nprint(x[n])","repo_name":"Yoo-an/Algorithm","sub_path":"Dynamic Programming/1463 - 1로 만들기.py","file_name":"1463 - 1로 만들기.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"71951325236","text":"import json\nimport logging\nimport random\nfrom pathlib import Path\n\nimport discord\nfrom discord.ext import commands\nfrom rapidfuzz import fuzz\n\nfrom bot import constants\nfrom bot.bot import Bot\n\nlog = logging.getLogger(__name__)\n\nPRIDE_RESOURCE = json.loads(Path(\"bot/resources/holidays/pride/prideleader.json\").read_text(\"utf8\"))\nMINIMUM_FUZZ_RATIO = 40\n\n\nclass PrideLeader(commands.Cog):\n \"\"\"Gives information about Pride Leaders.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n def invalid_embed_generate(self, pride_leader: str) -> discord.Embed:\n \"\"\"\n Generates Invalid Embed.\n\n The invalid embed contains a list of closely matched names of the invalid pride\n leader the user gave. If no closely matched names are found it would list all\n the available pride leader names.\n\n Wikipedia is a useful place to learn about pride leaders and we don't have all\n the pride leaders, so the bot would add a field containing the wikipedia\n command to execute.\n \"\"\"\n embed = discord.Embed(\n color=constants.Colours.soft_red\n )\n valid_names = []\n pride_leader = pride_leader.title()\n for name in PRIDE_RESOURCE:\n if fuzz.ratio(pride_leader, name) >= MINIMUM_FUZZ_RATIO:\n valid_names.append(name)\n\n if not valid_names:\n valid_names = \", \".join(PRIDE_RESOURCE)\n error_msg = \"Sorry your input didn't match any stored names, here is a list of available names:\"\n else:\n valid_names = \"\\n\".join(valid_names)\n error_msg = \"Did you mean?\"\n\n embed.description = f\"{error_msg}\\n```\\n{valid_names}\\n```\"\n embed.set_footer(text=\"To add more pride leaders, feel free to open a pull request!\")\n\n return embed\n\n def embed_builder(self, pride_leader: dict) -> discord.Embed:\n \"\"\"Generate an Embed with information about a pride leader.\"\"\"\n name = next(name for name, info in PRIDE_RESOURCE.items() if info == pride_leader)\n\n embed = discord.Embed(\n title=name,\n description=pride_leader[\"About\"],\n color=constants.Colours.blue\n )\n embed.add_field(\n name=\"Known for\",\n value=pride_leader[\"Known for\"],\n inline=False\n )\n embed.add_field(\n name=\"D.O.B and Birth place\",\n value=pride_leader[\"Born\"],\n inline=False\n )\n embed.add_field(\n name=\"Awards and honors\",\n value=pride_leader[\"Awards\"],\n inline=False\n )\n embed.add_field(\n name=\"For More Information\",\n value=f\"Do `{constants.Client.prefix}wiki {name}`\"\n f\" in <#{constants.Channels.sir_lancebot_playground}>\",\n inline=False\n )\n embed.set_thumbnail(url=pride_leader[\"url\"])\n return embed\n\n @commands.command(aliases=(\"pl\", \"prideleader\"))\n async def pride_leader(self, ctx: commands.Context, *, pride_leader_name: str | None) -> None:\n \"\"\"\n Information about a Pride Leader.\n\n Returns information about the specified pride leader\n and if there is no pride leader given, return a random pride leader.\n \"\"\"\n if not pride_leader_name:\n leader = random.choice(list(PRIDE_RESOURCE.values()))\n else:\n leader = PRIDE_RESOURCE.get(pride_leader_name.title())\n if not leader:\n log.trace(f\"Got a Invalid pride leader: {pride_leader_name}\")\n\n embed = self.invalid_embed_generate(pride_leader_name)\n await ctx.send(embed=embed)\n return\n\n embed = self.embed_builder(leader)\n await ctx.send(embed=embed)\n\n\nasync def setup(bot: Bot) -> None:\n \"\"\"Load the Pride Leader Cog.\"\"\"\n await bot.add_cog(PrideLeader(bot))\n","repo_name":"python-discord/sir-lancebot","sub_path":"bot/exts/holidays/pride/pride_leader.py","file_name":"pride_leader.py","file_ext":"py","file_size_in_byte":3910,"program_lang":"python","lang":"en","doc_type":"code","stars":226,"dataset":"github-code","pt":"4"} +{"seq_id":"37948791177","text":"from pyspark import SparkContext, SparkConf, StorageLevel\nfrom pyspark.sql import SparkSession\nfrom pyspark.ml.evaluation import RegressionEvaluator, MulticlassClassificationEvaluator\nfrom pyspark.sql import Row\nfrom pyspark.mllib.evaluation import RankingMetrics\n\n\ndef createMovieRatingPair(pred):\n return pred.user, (pred.movie, pred.rating)\n\n\ndef createMoviePredictionPair(pred):\n return pred.user, (pred.movie, pred.prediction)\n\n\ndef sortTuples(movies):\n uid = movies[0]\n mTuples = movies[1]\n sortedMovies = sorted(mTuples, key=lambda x: x[1])\n rankedMovies = []\n for tup in sortedMovies:\n if (tup[1] >= 3.0):\n rankedMovies.append(tup[0])\n return uid, rankedMovies\n\n\ndef mapRatingRow(line):\n charDelim = '\\t'\n user, movie, rating, timestamp = line.split(charDelim, 3)\n return Row(user=int(user), movie=int(movie), rating=float(rating))\n\n\ndef calculateMetrics(predictionDF):\n metricOutput = []\n # Calculate regression metrics\n evaluator = RegressionEvaluator(metricName=\"rmse\", labelCol=\"rating\", predictionCol=\"prediction\")\n rmse = evaluator.evaluate(predictionDF)\n metricOutput.append({\"metric\": \"RMSE\", \"value\": str(rmse)})\n evaluator = RegressionEvaluator(metricName=\"mae\", labelCol=\"rating\", predictionCol=\"prediction\")\n mse = evaluator.evaluate(predictionDF)\n metricOutput.append({\"metric\": \"MAE\", \"value\": str(mse)})\n\n # Calculate accuracy metrics\n evaluator = MulticlassClassificationEvaluator(predictionCol=\"prediction\", labelCol=\"rating\")\n acc = evaluator.evaluate(predictionDF, {evaluator.metricName: \"accuracy\"})\n metricOutput.append({\"metric\": \"Accuracy\", \"value\": str(acc)})\n\n # Calculate ranking metrics\n testRDD = predictionDF.rdd.map(createMovieRatingPair) # user, (movie, rating)\n testRDD = testRDD.groupByKey() # user, [(movie1, rating1), (movie2, rating2), ..., (movieN, ratingN)]\n testRDD = testRDD.map(sortTuples) # user, [movie1, movie2, ... , movieN)\n\n predictionsRDD = predictionDF.rdd.map(createMoviePredictionPair) # user, (movie, prediction)\n predictionsRDD = predictionsRDD.groupByKey() # user [(movie1, prediction1), (movie2, prediction2)...]\n predictionsRDD = predictionsRDD.map(sortTuples) # user, [movie1, movie2, ... , movieN]\n predictionsAndRatings = predictionsRDD.join(testRDD).values()\n\n metrics = RankingMetrics(predictionsAndRatings)\n map = metrics.meanAveragePrecision\n metricOutput.append({\"metric\": \"MAP\", \"value\": str(map)})\n\n # Output metrics\n metricRDD = sc.parallelize(metricOutput)\n metricDF = metricRDD.toDF()\n metricDF.coalesce(1).write.format('json').save(\"rec-metrics.json\")\n\n\ndef train_model(data):\n # fit model\n rec = MixMemRecommender(itemCol='movie')\n recModel = rec.fit(data)\n return recModel\n\n\ndef predict_with_model(recModel, data):\n\n predictions = recModel.transform(test_ratings)\n # calculate metrics\n calculateMetrics(predictions)\n\n\nif __name__ == \"__main__\":\n conf = SparkConf()\n conf.set('spark.executor.memory', '8g')\n conf.set('spark.app.name', 'mix-mem-rec')\n conf.set('spark.serializer', 'org.apache.spark.serializer.KryoSerializer')\n\n sc = SparkContext(conf=conf)\n spark = SparkSession \\\n .builder \\\n .getOrCreate()\n\n sc.addFile(\"mixmemrec.py\")\n from mixmemrec import MixMemRecommender, MixMemModel\n\n # load data file\n ratingsRDD = sc.textFile(\"movie.100k.data\")\n rddTrain, rddTest = ratingsRDD.randomSplit([.7, .3], 17)\n rddTrain = rddTrain.map(mapRatingRow)\n rddTest = rddTest.map(mapRatingRow)\n ratings = spark.createDataFrame(rddTrain)\n test_ratings = spark.createDataFrame(rddTest)\n\n # train model\n model = train_model(ratings)\n\n # save model\n model.save(\"mixmemrec\")\n\n # load model\n model = MixMemModel(itemCol='movie')\n model.load(spark, \"mixmemrec\")\n\n # make predictions\n predict_with_model(model, test_ratings)\n\n\n\n\n","repo_name":"billjeffries/mixMemRec","sub_path":"src/movie_example.py","file_name":"movie_example.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"8950058713","text":"import yfinance as yf\nimport pandas as pd\nimport datetime\nfrom datetime import date\nimport financedatabase as fd\nimport streamlit as st\nimport numpy as np\nimport statistics\n\nst.title('Investments')\nst.markdown('Description')\nst.image('https://i.dailymail.co.uk/i/pix/2017/09/12/15/442F442E00000578-0-image-a-13_1505228265730.jpg')\n\n\ncol1, col2 = st.columns(2)\ninvested_amount = col1.number_input('Insert initial amount', 1, 1000000)\nyears_maintained = col2.slider('Years of maintenance', 1, 25)\n\n\nif invested_amount < 5000:\n list_etf = ['^GSPC']\nelif invested_amount < 10000:\n list_etf = ['^GSPC', 'GC=F']\nelif invested_amount < 25000:\n list_etf = ['^GSPC', 'GC=F', '^IXIC', '^TNX']\nelif invested_amount < 50000:\n list_etf = ['^GSPC', 'GC=F', '^IXIC', '^TNX', '^FTSE', '^N225', '^GDAXI', '^FCHI','^STOXX50E'] # global indexes\nelse:\n list_etf = ['^GSPC', 'GC=F', '^IXIC', '^TNX', '^FTSE', '^N225', '^GDAXI', '^FCHI','^STOXX50E', 'EMQQ', 'FEM'] # emergents\n \n\n\n# dates of historical data of actives\nstart = \"2012-06-30\"\ntoday = date.today().strftime(\"%Y-%m-%d\")\n\n# download the historical data of each active\nlist_etf_data = []\nfor i in range(len(list_etf)):\n print(i)\n etf = yf.download(list_etf[i], start, today)\n list_etf_data.append(etf)\n\ndict_invested_amount_by_ticker = {}\ndict_money_to_ticker = {}\ncount = 0\n\n# add proportion of money to each ticker of list_etf in dict, its are units of active purchased\nif invested_amount < 5000: # only sp500\n investment1 = invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[0]] = investment1\n dict_money_to_ticker[list_etf[0]] = invested_amount\n\n \nelif invested_amount < 10000: # Gold and sp\n investment1 = 0.8*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n count = count +1\n dict_invested_amount_by_ticker[list_etf[0]] = investment1\n investment2 = 0.2*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[1]] = investment2\n dict_money_to_ticker[list_etf[0]] = invested_amount*0.8\n dict_money_to_ticker[list_etf[1]] = invested_amount*0.2\n\n\nelif invested_amount < 25000: # Gold, nq and sp\n investment1 = 0.4*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[0]] = investment1\n count = count +1\n \n \n investment2 = 0.2*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[1]] = investment2\n count = count +1\n \n investment3 = 0.2*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[2]] = investment3\n count = count +1\n \n investment4 = 0.2*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[3]] = investment4\n dict_money_to_ticker[list_etf[0]] = invested_amount*0.4\n dict_money_to_ticker[list_etf[1]] = invested_amount*0.2\n dict_money_to_ticker[list_etf[2]] = invested_amount*0.2\n dict_money_to_ticker[list_etf[3]] = invested_amount*0.2\n \n \nelif invested_amount < 50000: # Gold, nq and sp\n investment1 = 0.2*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[0]] = investment1\n count = count +1\n \n investment2 = 0.1*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[1]] = investment2\n count = count +1\n \n investment3 = 0.1*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[2]] = investment3\n count = count +1\n \n investment4 = 0.1*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[3]] = investment4\n count = count +1\n \n investment5 = 0.1*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[4]] = investment5\n count = count +1\n \n investment6 = 0.1*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[5]] = investment6\n count = count +1\n \n investment7 = 0.1*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[6]] = investment7\n count = count +1\n \n investment8 = 0.1*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[7]] = investment8\n count = count +1\n \n investment9 = 0.1*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[8]] = investment9\n \n dict_money_to_ticker[list_etf[0]] = invested_amount*0.2\n dict_money_to_ticker[list_etf[1]] = invested_amount*0.1\n dict_money_to_ticker[list_etf[2]] = invested_amount*0.1\n dict_money_to_ticker[list_etf[3]] = invested_amount*0.1\n dict_money_to_ticker[list_etf[4]] = invested_amount*0.1\n dict_money_to_ticker[list_etf[5]] = invested_amount*0.1\n dict_money_to_ticker[list_etf[6]] = invested_amount*0.1\n dict_money_to_ticker[list_etf[7]] = invested_amount*0.1\n dict_money_to_ticker[list_etf[8]] = invested_amount*0.1\n\n \nelse: # Gold, nq and sp\n investment1 = 0.2*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[0]] = investment1\n count = count +1\n \n investment2 = 0.1*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[1]] = investment2\n count = count +1\n \n investment3 = 0.1*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[2]] = investment3\n count = count +1\n \n investment4 = 0.08*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[3]] = investment4\n count = count +1\n \n investment5 = 0.08*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[4]] = investment5\n count = count +1\n \n investment6 = 0.08*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[5]] = investment6\n count = count +1\n \n investment7 = 0.08*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[6]] = investment7\n count = count +1\n \n investment8 = 0.08*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[7]] = investment8\n count = count +1\n \n investment9 = 0.08*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[8]] = investment9\n count = count +1\n \n investment10 = 0.06*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[9]] = investment10\n count = count +1\n \n investment11 = 0.06*invested_amount/list_etf_data[count]['Adj Close'].iloc[-1]\n dict_invested_amount_by_ticker[list_etf[10]] = investment11\n \n dict_money_to_ticker[list_etf[0]] = invested_amount*0.2\n dict_money_to_ticker[list_etf[1]] = invested_amount*0.1\n dict_money_to_ticker[list_etf[2]] = invested_amount*0.1\n dict_money_to_ticker[list_etf[3]] = invested_amount*0.08\n dict_money_to_ticker[list_etf[4]] = invested_amount*0.08\n dict_money_to_ticker[list_etf[5]] = invested_amount*0.08\n dict_money_to_ticker[list_etf[6]] = invested_amount*0.08\n dict_money_to_ticker[list_etf[7]] = invested_amount*0.08\n dict_money_to_ticker[list_etf[8]] = invested_amount*0.08\n dict_money_to_ticker[list_etf[9]] = invested_amount*0.06\n dict_money_to_ticker[list_etf[10]] = invested_amount*0.06\n\n# Preprocessing the data, create function for add more features\ndef add_features(df):\n df = df.reset_index()\n df['HL_pct'] = (df['High'] - df['Low']) / df['Low']\n df['pct_change'] = (df['Close'] - df['Open']) / df['Open']\n df['Date'] = pd.to_datetime(df['Date'])\n df['year'] = df['Date'].dt.year\n df['month'] = df['Date'].dt.month\n df['day'] = df['Date'].dt.day\n df['day_of_week'] = df['Date'].dt.dayofweek\n # Mapping day of week to actual day names\n day_names = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\n df['day_of_week'] = df['day_of_week'].map(lambda x: day_names[x])\n return df\n\n# use the function each active, return a list of df with new features\nfor i in range(len(list_etf_data)):\n list_etf_data[i] = add_features(list_etf_data[i])\n\n\n# separate by year the data of each active, return a dict with key the active and values the df of the active by year\ndict_etf_data_years = {}\n\nfor i in range(len(list_etf_data)):\n years = list_etf_data[i]['year'].unique()\n list_values = []\n for j in range(len(years)):\n x = list_etf_data[i][list_etf_data[i]['year']==years[j]]\n list_values.append(x)\n if j == len(years)-1:\n dict_etf_data_years[list_etf[i]] = list_values\n\n# return a dict of percentage of each years of each active\n\npct_each_year_by_active = {}\nfor i in range(len(dict_etf_data_years)): # dict\n list_values = []\n for j in range(len(dict_etf_data_years[list_etf[i]])): # keys\n x = float(dict_etf_data_years[list_etf[i]][j][-1:]['Close'])/float(dict_etf_data_years[list_etf[i]][j][:1]['Close'])\n list_values.append(x)\n if j == len(dict_etf_data_years[list_etf[i]])-1:\n pct_each_year_by_active[list_etf[i]] = list_values \n \n\n# return a dict of mean of percentage of each active by year\npct_mean_active = {}\n\nfor i in range(len(pct_each_year_by_active)):\n list_values = []\n for j in range(len(pct_each_year_by_active[list_etf[i]])):\n x = pct_each_year_by_active[list_etf[i]][j]\n list_values.append(x)\n if j == len(pct_each_year_by_active[list_etf[i]])-1:\n pct_mean_active[list_etf[i]] = statistics.mean(list_values)\n \n\n# values of investment year to year and final investment, NO MONEY!!\n\ndict_final_result_active_value = {}\ndict_futures_values_actives = {}\nfor i in range(len(list_etf)):\n x = list_etf_data[i]['Adj Close'].iloc[-1]\n list_values = []\n for j in range(years_maintained):\n x = x*pct_mean_active[list_etf[i]]\n list_values.append(x)\n dict_final_result_active_value[list_etf[i]] = x\n if j == years_maintained-1:\n dict_futures_values_actives[list_etf[i]] = list_values\n\n\nfinal_invested_amount ={}\n\nfor i in range(len(dict_invested_amount_by_ticker)):\n final_invested_amount[list_etf[i]] = dict_invested_amount_by_ticker[list_etf[i]]*dict_final_result_active_value[list_etf[i]]\n\n\nfinal_full_return = []\nfor i in range(len(list_etf)):\n final_full_return.append(final_invested_amount[list_etf[i]])\n \nfinal_full_return = sum(final_full_return)\n\ndf_final = pd.DataFrame(dict_futures_values_actives)\n\n\n# return a dict of mean of standard deviation of each active of all years\n\ndict_etf_data_mean_std = {}\nfor i in range(len(dict_etf_data_years)):\n list_values = []\n for j in range(len(dict_etf_data_years[list_etf[i]])): # keys\n x = dict_etf_data_years[list_etf[i]][j].describe()['pct_change'].loc['std']\n if x == np.float64('Nan'):\n print(x == np.float64('Nan'))\n x = dict_etf_data_years[list_etf[i]][j].describe()['pct_change'].iloc[6]\n x = np.mean(x)\n list_values.append(x)\n if j == len(dict_etf_data_years[list_etf[i]])-1:\n dict_etf_data_mean_std[list_etf[i]] = x\n\n# create functions for add standard deviation to the data\ndef standard_up(price, st):\n x = price*st\n x = price + x\n return x\n\ndef standard_down(price, st):\n x = price*st\n x = price - x\n return x\n\n# string for add with the name of the active to the column\nstring_std = '_std'\nstring_std_u = '_std_u' # % up\nstring_std_d = '_std_d' # down\n\nfor i in range(len(list_etf)):\n df_final[f'{list_etf[i]}{string_std}'] = dict_etf_data_mean_std[list_etf[i]]\n \n\nfor i in range(len(df_final[f'{list_etf[0]}{string_std}'])):\n for j in range(len(list_etf)):\n df_final[f'{list_etf[j]}{string_std}'][i] = df_final[f'{list_etf[j]}{string_std}'][i]+df_final[f'{list_etf[j]}{string_std}'][i]*i\n\n# add columns of standard deviation\nfor i in range(len(list_etf)):\n df_final[f'{list_etf[i]}{string_std_u}'] = df_final.apply(lambda x: standard_up(x[list_etf[i]],\n x[f'{list_etf[i]}{string_std}']),\n axis=1)\n df_final[f'{list_etf[i]}{string_std_d}'] = df_final.apply(lambda x: standard_down(x[list_etf[i]],\n x[f'{list_etf[i]}{string_std}']),\n axis=1)\n\n\n\ncols = []\n\nfor i in range(len(list_etf)):\n cols.append(df_final.columns[i])\n cols.append(df_final.columns[i+len(list_etf)])\n cols.append(df_final.columns[(i+len(list_etf))*2])\n cols.append(df_final.columns[((i+len(list_etf))*2)+1])\n\n\ndf_final = df_final[cols]\n\ndf_final_return_by_active = df_final\n\ncount = 0\nfor i in range(len(list_etf)):\n #i = i*3 # 012,345,678\n df_final_return_by_active[df_final_return_by_active.columns[count]] = df_final_return_by_active[df_final_return_by_active.columns[count]]*dict_invested_amount_by_ticker[list_etf[i]]\n df_final_return_by_active[df_final_return_by_active.columns[count+2]] = df_final_return_by_active[df_final_return_by_active.columns[count+2]]*dict_invested_amount_by_ticker[list_etf[i]]\n df_final_return_by_active[df_final_return_by_active.columns[count+3]] = df_final_return_by_active[df_final_return_by_active.columns[count+3]]*dict_invested_amount_by_ticker[list_etf[i]]\n count = count + 4 # columns of value an std\n\n\nlist_etf_std = []\ncount = 0\n\nfor i in range(len(list_etf)): # 3 is value plus standard deviatiations\n list_etf_std.append(df_final_return_by_active.columns[count+2])\n list_etf_std.append(df_final_return_by_active.columns[count]) \n list_etf_std.append(df_final_return_by_active.columns[count+3])\n count = count + 4\n\ndf_final_return_by_active = df_final_return_by_active[list_etf_std]\n\ndf_final_return_by_active_T = df_final_return_by_active.T # for plot and df streamlit\n\nfinal_sum_return_all = {}\nfor i in range(years_maintained):\n list_values_u = []\n list_values = []\n list_values_d = []\n count = 0\n for j in range(len(list_etf)):\n list_values_u.append(df_final_return_by_active_T[i][count])\n list_values.append(df_final_return_by_active_T[i][count+1])\n list_values_d.append(df_final_return_by_active_T[i][count+2])\n count = count + 3\n if j == len(list_etf)-1:\n if i+1 <10:\n final_sum_return_all[f'0{i+1}_year'] = [sum(list_values_u), sum(list_values), sum(list_values_d)]\n else:\n final_sum_return_all[f'{i+1}_year'] = [sum(list_values_u), sum(list_values), sum(list_values_d)]\ndf_final_full_std_by_year = pd.DataFrame(final_sum_return_all)\n\nrow_names = {0 : 'std_up',\n 1 : 'normal',\n 2 : 'std_down'}\n\ndf_final_full_std_by_year = df_final_full_std_by_year.rename(index = row_names)\n\ndf_final_full_std_by_year_for_plot = pd.DataFrame(final_sum_return_all)\ndf_final_full_std_by_year_for_plot = df_final_full_std_by_year_for_plot.rename(index = row_names)\n\n# add percentage, according the invested_amount\ndef percentage_diff(df):\n x = return_/initial\n return x\n\nfor i in range(len(df_final_full_std_by_year.columns)):\n df_final_full_std_by_year[f'{df_final_full_std_by_year.columns[i]}_%'] = (df_final_full_std_by_year[df_final_full_std_by_year.columns[i]]/invested_amount-1)*100\n\nlist_cols = []\nfor i in range(years_maintained):\n list_cols.append(df_final_full_std_by_year.columns[i])\n list_cols.append(df_final_full_std_by_year.columns[i+years_maintained])\n\ndf_final_full_std_by_year = df_final_full_std_by_year[list_cols]\n\ntab1, tab2, tab3, tab4, tab5 = st.tabs(['Result chart', 'Tickers chart', 'Final result', 'Results', 'Tickers name'])\ntab1.line_chart(df_final_full_std_by_year_for_plot.T)\ntab2.line_chart(df_final_return_by_active)\ntab3.table(df_final_full_std_by_year)\ntab4.table(df_final_return_by_active)\ntab5.markdown('Tickers, names')\ntab5.markdown('News')\ntab5.markdown('metrics')\ntab5.markdown('POC')\ntab5.markdown('active or passive')\ntab5.markdown('info tickers')\n\nst.header('DISCLAIMER')","repo_name":"capyami/final_project","sub_path":"final_project_asset.py","file_name":"final_project_asset.py","file_ext":"py","file_size_in_byte":16594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"39225726263","text":"import os\nimport zipfile\nimport tkinter as tk\nimport customtkinter as ctk\nfrom tkinter import filedialog\nfrom tkinter import messagebox\n\narquivos_encontrados = []#Lista de arquivos\n\ndef search(): # Busca os arquivos por código\n code = entry_searchFile.get()\n directory = entry_select.get()\n global arquivos_encontrados\n for file in os.listdir(directory):\n if code.lower() in file.lower():\n arquivos_encontrados.append(os.path.join(directory, file))\n return arquivos_encontrados\n\ndef zip(): # compacta os arquivos\n global arquivos_encontrados\n filetypes = [('Arquivos', '*.zip')]\n destiny = filedialog.asksaveasfilename(defaultextension=\".zip\", initialfile=\"Arquivos\", filetypes=filetypes)\n with zipfile.ZipFile(destiny, 'w') as zip_file:\n for file in arquivos_encontrados:\n zip_file.write(file, os.path.basename(file))\n\ndef select(): # Seleciona o direório de busca\n diretorio = filedialog.askdirectory()\n entry_select.delete(0, tk.END)\n entry_select.insert(0, diretorio)\n\ndef execute():\n try:\n path = entry_select.get()\n searchfile = entry_searchFile.get()\n if(path!=\"\" and searchfile!= \"\"):\n search()\n zip()\n else:\n messagebox.showerror(\"ATENÇÃO\", \"Preenca todos os campos\")\n except Exception as e:\n messagebox.showerror(\"ATENÇÃO\", f\"Ocorreu um erro: {e}\")\n\nframe = ctk.CTk()\nframe.geometry(\"240x240\")\nframe.title(\"SearchFile\")\nframe.resizable(False, False)\n\nlabel_searchFile = ctk.CTkLabel(master=frame,text=\"SearchFile\", width=180, height=30)\nlabel_searchFile.configure(justify=\"center\", font=(\"arial\", 18))\nlabel_searchFile.place(x=30, y=5)\n\nbutton_select = ctk.CTkButton(master=frame, text= \"Diretório origem\", width=180, height=30, command=select, fg_color=\"dark grey\", text_color=\"black\", hover_color=\"gray\")\nbutton_select.configure(font=(\"arial\", 14))\nbutton_select.place(x=30, y=50)\n\nentry_select = ctk.CTkEntry(master=frame, width=180, height=30)\nentry_select.configure(justify=\"center\", font=(\"arial\", 14))\nentry_select.place(x=30, y=90)\n\nlabel_code = ctk.CTkLabel(master=frame,text=\"Insira a palavra chave:\", width=180, height=30, text_color=\"white\")\nlabel_code.configure(justify=\"center\", font=(\"arial\", 16))\nlabel_code.place(x=30, y=125)\n\nentry_searchFile = ctk.CTkEntry(master=frame, width=180, height=30)\nentry_searchFile.place(x=30, y=160)\nentry_searchFile.configure(justify=\"center\", font=(\"arial\", 14))\n\nbutton_execute = ctk.CTkButton(master=frame, text=\"Executar\", width=180, height=30, command=execute, fg_color=\"dark grey\", text_color=\"black\", hover_color=\"gray\")\nbutton_execute.configure(font=(\"arial\", 14))\nbutton_execute.place(x=30, y=200)\n\nframe.mainloop()\n","repo_name":"KevynSilveira/SearchFile","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"19261549476","text":"# -*- coding: utf-8 -*-\n\nfrom oauth2client.service_account import ServiceAccountCredentials\nimport gspread\nimport pandas as pd\n\n\nSCOPE = [\n 'https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive'\n]\n\nDEFAULT_CREDENTIALS = ServiceAccountCredentials.from_json_keyfile_name(\n 'credentials/covidometro-info-78d047b1cb0b.json',\n SCOPE\n)\n\nSOURCE_GSHEETS_KEY = '1MWQE3s4ef6dxJosyqvsFaV4fDyElxnBUB6gMGvs3rEc'\n\nDEFAULT_HEADER = [\n 'Estado', 'Total de Casos', 'Suspeitos', 'Curados', 'Óbitos', 'Testes',\n 'Novos Casos','Novos Óbitos'\n]\nNUM_COLUMNS = len(DEFAULT_HEADER)\n\n\ndef read_values(gsheet_key, credentials):\n client = gspread.authorize(credentials)\n\n sheet = client.open_by_key(gsheet_key)\n worksheet = sheet.sheet1\n\n cells = worksheet.range('B4:I31')\n values = list()\n cur_index = -1\n count = 0\n\n for cell in cells:\n if count % NUM_COLUMNS == 0:\n values.append(list())\n cur_index = cur_index + 1\n\n value = cell.value\n values[cur_index].append(value)\n\n count = count + 1\n\n return values\n\n\ndef read_values_as_dataframe(gsheet_key, credentials):\n data = read_values(gsheet_key, credentials)\n data[0][0] = 'BRASIL'\n\n return pd.DataFrame(data, columns=DEFAULT_HEADER)\n\n\ndef generate_data_file(gsheet_key, credentials, filename):\n filepath = '{}.csv'.format(filename)\n\n dataframe = read_values_as_dataframe(gsheet_key, credentials)\n dataframe.to_csv(filepath, index=False)\n\n\nif __name__ == '__main__':\n generate_data_file(SOURCE_GSHEETS_KEY, DEFAULT_CREDENTIALS, 'data')\n","repo_name":"embatbr/covidometro","sub_path":"back/gsheets-data-fetcher.py","file_name":"gsheets-data-fetcher.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"12218618446","text":"import pygame, os, sys\nfrom pygame.locals import *\n\nfrom raspigame import *\nfrom swarm import *\nfrom player import *\nfrom collision import *\n\n\nclass PlayGameState(GameState):\n\t\n\tdef __init__(self, game, gameOverState):\n\t\tsuper(PlayGameState, self).__init__(game)\n\t\tself.controllers = None\n\t\tself.renderers = None\n\t\tself.player_controller = None\n\t\tself.player_controller2 = None\n\t\tself.gameOverState = gameOverState\n\t\tself.initialise()\n\t\t\n\tdef onEnter(self, previousState):\n\t\tself.player_controller.pause(False)\n\n\tdef initialise(self):\n\t\t\n\n\t\tself.player_controller = PlayerController(0, 540)\n\t\tplayer_renderer = PlayerView(self.player_controller, 'swordsman.png')\n\t\tlives_renderer = PlayerLivesView(self.player_controller, 'swordsman.png')\n\t\tbullet_renderer = BulletView(self.player_controller.bullets, 'sword.png')\n\t\talienbullet_renderer = BulletView(self.player_controller.bullets, 'fireball.png')\n\n\t\tself.swarm_controller = PlayerController(0, 540)\n\t\tplayer2_renderer = Player2View(self.Player_controller2, 'mage.png')\n\t\tlives2_renderer = Player2LivesView(self.player_controller2, 'mage.png')\n\t\tbullet_renderer = BulletView(self.player_controller2.bullets, 'mage.png')\n\t\talienbullet_renderer = BulletView(self.player_controller2.bullets, 'fireball.png')\n\n\t\texplosion_controller = ExplosionController(self.game)\n\t\tcollision_controller = CollisionController(self.game, self.swarm_controller, self.player_controller, explosion_controller, self)\n\n\t\texplosion_view = ExplosionView(explosion_controller.list.explosions, 'explosion.png', 32, 32)\n\n\t\tself.renderers = [ bullet_renderer, lives_renderer, player_renderer, alienbullet_renderer, player2_renderer,lives2_renderer, explosion_view ]\n\t\tself.controllers = [ self.player_controller2, self.player_controller, collision_controller, explosion_controller ]\n\n\tdef update(self, gameTime):\n\t\tfor ctrl in self.controllers:\n\t\t\tctrl.update(gameTime)\t\n\t\t\t\n\t\tif ( self.player_controller.model.lives == 0 ):\n\t\t\tself.game.changeState( self.gameOverState )\n\t\t\t\n\t\tif ( self.player_controller2.model.lives == 0):\n\n\t\t\tlevelUpMessage = InterstitialState( invadersGame, 'player 1 wins ', 2000, self )\n\n\tdef draw(self, surface):\n\t\tfor view in self.renderers:\n\t\t\tctrl.update(gameTime)\n","repo_name":"ziispawn/nikita-s-final","sub_path":"invadersgame.py","file_name":"invadersgame.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"19033238479","text":"# # Python3 program to reverse a string\n# # s = input()\n# s = \"i like this program very much\"\n# words = s.split(' ')\n# string = []\n# for word in words:\n\n# print(word);\n\t\n# string.insert(0, word);\n\n\n# print(\" \".join(string))\n\n# # Solution proposed bu Uttam\n\n\ndef reverseWords(s):\n\n # splitting the string\n s = s.split(\" \")\n\n # initializing 2 pointers\n left = 0\n right = len(s) - 1\n\n # traversing till the left does not exceed the right pointer\n while left <= right:\n \t# swap elements\n s[left], s[right] = s[right], s[left]\n\n # increment left and decrement right\n left += 1\n right -= 1\n\n s = \" \".join(s)\n return s\n\ns = 'I love Programming bottom of My Heart'\nprint(reverseWords(s))\n\n","repo_name":"priya597/python_practice","sub_path":"reversString.py","file_name":"reversString.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"34032358374","text":"#pdfplumber\nimport pdfplumber\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\n\n\npdf = pdfplumber.open(\"tdostats.pdf\")\n\nloc_columns = [\"Month_Year\", \"Total_Events\"]\n\ndef get_total(doc):\n total_rows = []\n year_counter = 2014\n select_pages = [1, 2, 3, 4, 5, 7, 10, 13]\n doc_pages = list(doc.pages[i] for i in select_pages)\n for page in doc_pages:\n table_first_half = page.extract_table()[-13:-7]\n table_second_half = page.extract_table()[-7:-1]\n if year_counter < 2016:\n for row in table_first_half:\n total_rows.append([row[0] + \" \" + str(year_counter), row[6]])\n for row in table_second_half:\n total_rows.append([row[0] + \" \" + str(year_counter + 1), row[6]])\n else:\n for row in table_first_half:\n total_rows.append([row[0] + \" \" + str(year_counter), row[7]])\n for row in table_second_half:\n total_rows.append([row[0] + \" \" + str(year_counter + 1), row[7]])\n year_counter += 1\n return total_rows\n\ndef create_table(rows, cols):\n df = pd.DataFrame(rows, columns=cols)\n df[\"Total_Events\"] = df[\"Total_Events\"].apply(pd.to_numeric)\n df[\"Month_Year\"] = pd.to_datetime(df[\"Month_Year\"], format=\"%B %Y\")\n df = df.set_index(df[\"Month_Year\"])\n df = df.sort_index()\n return df\n\nloc_df = create_table(get_total(pdf), loc_columns)\n\nprint(loc_df)\n\nplt.style.use('seaborn-dark')\nplt.plot(loc_df['Total_Events'])\nplt.ylabel('Events Per Month', fontsize=12.0)\nplt.title('Loss of Custody Events', fontsize=16.0, pad=10.0)\nplt.gcf().autofmt_xdate()\nplt.tight_layout(pad=2.0)\nplt.show()\n\n\n\n","repo_name":"gam32bit/tdo","sub_path":"tdo_tables.py","file_name":"tdo_tables.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"26987003673","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport json\nimport re\nfrom modulospy import baserequest\nfrom modelos import todosmodelos\nimport imgs3 as Img\nimport unidecode\n\ndef slugify(str):\n str = unidecode.unidecode(str).lower()\n return re.sub(r'\\W+','-',str)\n\nclass clunotro(baserequest.respuesta):\n\t@baserequest.user_required_json_tienda\n\tdef post(self):\n\t\tif self.objjson.has_key(\"padre\") and self.objjson.has_key(\"datos\") and type(self.objjson[\"datos\"]) == list:\n\t\t\ttry:\n\t\t\t\tidpadre=int(self.objjson[\"padre\"])\n\t\t\texcept:\n\t\t\t\tself.errornor(\"idpadre erróneo %s\" % self.objjson[\"padre\"])\n\t\t\t\treturn\n\t\t\tdatos=self.objjson[\"datos\"]\n\t\t\tancestor_key= self.session['usuario'].tienda\n\t\t\tpadre=todosmodelos.Otros.get_by_id(idpadre,parent=ancestor_key)\n\t\t\tif not padre:\n\t\t\t\tself.errornor(\"idpadre no existe\")\n\t\t\t\treturn\n\t\t\tkpadre=padre.key\n\t\telse:\n\t\t\tself.errornor(\"padre erróneo\")\n\t\t\treturn\n\t\tope=self.objjson[\"ope\"]\n\t\tif ope==\"ins\":\n\t\t\tva=[\"nombre\",\"descrip\"]\n\t\t\tta=[80,200]\n\t\t\tself.devol=[]\n\t\t\tself.numd=0\n\t\t\tfor cam in datos:\n\t\t\t\tif not self.existeen([\"nombre\",\"descrip\",\"precio\",\"grupo\"],cam) or not self.loncadeen(va,ta,cam):\n\t\t\t\t\tself.errorcomp(\"error en campos\")\n\t\t\t\t\treturn\n\t\t\t\ttry:\n\t\t\t\t\tpre=float(cam[\"precio\"])\n\t\t\t\t\tgr=cam[\"grupo\"][:80]\n\t\t\t\texcept: # ValueError as e:\n\t\t\t\t\tself.errorcomp(\"precio, id\") # %s\" % e)\n\t\t\t\t\treturn\n\t\t\t\tuot= todosmodelos.UnOtros(parent=kpadre,nombre=cam[\"nombre\"],descrip=cam[\"descrip\"],grupo=gr,precio=pre)\n\t\t\t\tk=uot.put()\n\t\t\t\tif k:\n\t\t\t\t\tself.devol.append([k.integer_id(), k.urlsafe()])\n\t\t\t\t\tself.numd+=1\n\t\t\t\t\t#self.ok(k.integer_id(), k.urlsafe())\n\t\t\t\telse:\n\t\t\t\t\tself.errorcomp(\"no se pudo grabar.\")\n\t\t\tself.ok(self.devol,1)\n\t\telif ope==\"mod\":\n\t\t\tva=[\"nombre\",\"descrip\"]\n\t\t\tta=[80,200]\n\t\t\tself.devol=[]\n\t\t\tself.numd=0\n\t\t\tfor cam in datos:\n\t\t\t\tif not self.existeen([\"id\",\"nombre\",\"descrip\",\"precio\",\"grupo\"],cam) or not self.loncadeen(va,ta,cam):\n\t\t\t\t\tself.errorcomp(\"error en campos\")\n\t\t\t\t\treturn\n\t\t\t\ttry:\n\t\t\t\t\tpre=float(cam[\"precio\"])\n\t\t\t\t\tgr=cam[\"grupo\"][:80]\n\t\t\t\t\tid=int(cam[\"id\"])\n\t\t\t\texcept: # ValueError as e:\n\t\t\t\t\tself.errorcomp(\"precio, id\") # %s\" % e)\n\t\t\t\t\treturn\n\t\t\t\tx= todosmodelos.UnOtros.get_by_id(id,parent=kpadre)\n\t\t\t\tif not x:\n\t\t\t\t\tself.errorcomp(\"No se puede encontrar id\")\n\t\t\t\t\treturn\n\t\t\t\tx.populate(nombre=cam[\"nombre\"],descrip=cam[\"descrip\"],grupo=gr,precio=pre)\n\t\t\t\tk=x.put()\n\t\t\t\tif k:\n\t\t\t\t\tself.devol.append(k.integer_id())\n\t\t\t\t\tself.numd+=1\n\t\t\t\telse:\n\t\t\t\t\tself.errorcomp(\"no se pudo grabar\")\n\t\t\tself.ok(self.devol,1)\n\t\telif ope==\"del\":\n\t\t\tlids=[]\n\t\t\tfor i in datos:\n\t\t\t\ttry:\n\t\t\t\t\tid=int(i)\n\t\t\t\texcept:\n\t\t\t\t\tself.response.out.write(json.dumps({\"error\":\"i=%d\" % i,\"ids\":ids}))\n\t\t\t\t\treturn\n\t\t\t\tx=False\n\t\t\t\tx= todosmodelos.UnOtros.get_by_id(id,parent=kpadre)\n\t\t\t\tif x:\n\t\t\t\t\tif x.keyimagen:\n\t\t\t\t\t\timg=x.keyimagen.get()\n\t\t\t\t\t\timg.puntero-=1\n\t\t\t\t\t\tif img.puntero < 1:\n\t\t\t\t\t\t\tImg.borrarImg_cloud(img)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\timg.put()\n\t\t\t\t\t\t#lids.append(x.keyimagen)\n\t\t\t\t\tlids.append(x.key)\n\t\t\t\telse:\n\t\t\t\t\tself.response.out.write(json.dumps({\"error\":\"al append id\",\"ids\":ids}))\n\t\t\t\t\treturn\n\t\t\tif x:\n\t\t\t\ttodosmodelos.ndb.delete_multi(lids)\n\t\t\t\tself.ok(\"ok eliminados\")\n\nclass clOtros(baserequest.respuesta):\n\t@baserequest.user_required_json_tienda\n\tdef post(self):\n\t\tif not self.objjson.has_key(\"datos\") and not type(self.objjson[\"datos\"]) == list:\n\t\t\tself.errornor(\"error en datos\")\n\t\t\treturn\n\t\tdatos=self.objjson[\"datos\"]\n\t\tancestor_key= self.session['usuario'].tienda\n\t\tope=self.objjson[\"ope\"]\n\t\tif ope==\"ins\":\n\t\t\tva=[\"nombre\",\"descrip\"]\n\t\t\tta=[80,200]\n\t\t\tself.devol=[]\n\t\t\tself.numd=0\n\t\t\tfor cam in datos:\n\t\t\t\tif not self.existeen(va,cam) or not self.loncadeen(va,ta,cam):\n\t\t\t\t\tself.errorcomp(\"error en campos\")\n\t\t\t\t\treturn\n\t\t\t\tslugurl=slugify(cam[\"nombre\"])\n\t\t\t\thayotr=todosmodelos.Otros.query(todosmodelos.Otros.url==slugurl,ancestor=ancestor_key).get()\n\t\t\t\tif hayotr:\n\t\t\t\t\tself.errorcomp(\"1.3 Nombre de otro ( %s ) no valido ya existe\" % cam[\"nombre\"])\n\t\t\t\t\treturn\n\t\t\t\tma= todosmodelos.Otros(parent=ancestor_key,nombre=cam[\"nombre\"],descrip=cam[\"descrip\"],url=slugurl)\n\t\t\t\tk=ma.put()\n\t\t\t\tif k:\n\t\t\t\t\tself.devol.append((k.integer_id(),slugurl))\n\t\t\t\t\tself.numd+=1\n\t\t\t\telse:\n\t\t\t\t\tself.errorcomp(\"no se pudo grabar.\")\n\t\t\tself.ok(self.devol,1)\n\t\telif ope==\"mod\":\n\t\t\tva=[\"nombre\",\"descrip\"]\n\t\t\tta=[80,200]\n\t\t\tself.devol=[]\n\t\t\tself.numd=0\n\t\t\tfor cam in datos:\n\t\t\t\tif not self.existeen([\"id\",\"nombre\",\"descrip\"],cam) or not self.loncadeen(va,ta,cam):\n\t\t\t\t\tself.errorcomp(\"Faltan campos\")\n\t\t\t\t\treturn\n\t\t\t\ttry:\n\t\t\t\t\tid=int(cam[\"id\"])\n\t\t\t\texcept:\n\t\t\t\t\tself.errorcomp(\"id erróneo\")\n\t\t\t\t\treturn\n\t\t\t\tx= todosmodelos.Otros.get_by_id(id,parent=ancestor_key)\n\t\t\t\tif not x:\n\t\t\t\t\tself.errorcomp(\"No se puede encontrar id\")\n\t\t\t\t\treturn\n\t\t\t\tif not x.nombre == cam[\"nombre\"]:\n\t\t\t\t\tslugurl=slugify(cam[\"nombre\"])\n\t\t\t\t\thayotr=todosmodelos.Otros.query(todosmodelos.Otros.url==slugurl,ancestor=ancestor_key).get()\n\t\t\t\t\tif hayotr:\n\t\t\t\t\t\tself.errorcomp(\"1.3 Nombre de otro ( %s ) no valido ya existe\" % cam[\"nombre\"])\n\t\t\t\t\t\treturn\n\t\t\t\t\tx.url=slugurl\n\t\t\t\t\tx.nombre=cam[\"nombre\"]\n\t\t\t\telse:\n\t\t\t\t\tslugurl=cam[\"nombre\"]\n\t\t\t\tx.descrip=cam[\"descrip\"]\n\t\t\t\tk=x.put()\n\t\t\t\tif k:\n\t\t\t\t\tself.devol.append((k.integer_id(),slugurl))\n\t\t\t\t\tself.numd+=1\n\t\t\t\telse:\n\t\t\t\t\tself.errorcomp(\"no se pudo grabar\")\n\t\t\tself.ok(self.devol,1)\n\t\telif ope==\"del\":\n\t\t\tlids=[]\n\t\t\tfor i in datos:\n\t\t\t\ttry:\n\t\t\t\t\tid=int(i)\n\t\t\t\texcept:\n\t\t\t\t\tself.response.out.write(json.dumps({\"error\":\"i=%d\" % i,\"ids\":datos}))\n\t\t\t\t\treturn\n\t\t\t\tx=False\n\t\t\t\tx= todosmodelos.Otros.get_by_id(id,parent=ancestor_key)\n\t\t\t\tif x:\n\t\t\t\t\tlids.append(x.key)\n\t\t\t\telse:\n\t\t\t\t\tself.response.out.write(json.dumps({\"error\":\"al append id\",\"ids\":datos}))\n\t\t\t\t\treturn\n\t\t\t\tuo=todosmodelos.UnOtros.query(ancestor=x.key)\n\t\t\t\tfor iuo in uo: #.iter(keys_only=True):\n\t\t\t\t\tif iuo.keyimagen:\n\t\t\t\t\t\tlids.append(iuo.keyimagen)\n\t\t\t\t\tlids.append(iuo.key)\n\t\t\tif x:\n\t\t\t\ttodosmodelos.ndb.delete_multi(lids)\n\t\t\t\tself.ok(\"ok eliminados\")\n\nclass verTodos(baserequest.BaseHandler):\n\tdef get(self):\n\t\tmise = self.session.get('usuario')\n\t\tif not mise:\n\t\t\tself.response.out.write(json.dumps({\"error\":u\"No hay sesión de usuario\"},ensure_ascii=False))\n\t\t\treturn\n\t\tancestor_key= mise.tienda\n\t\totrosQry = todosmodelos.Otros.query(ancestor=ancestor_key).order(todosmodelos.Otros.nombre)\n\t\tlisotr=[]\n\t\tjsuotr={}\n\t\tfor i in otrosQry:\n\t\t\tid=i.key.id()\n\t\t\tlisotr.append([id,i.nombre,i.descrip,\"Listar\",i.url])\n\t\t\tuoQry=todosmodelos.UnOtros.query(ancestor=i.key).order(todosmodelos.UnOtros.grupo)\n\t\t\tauxlu=[]\n\t\t\tfor u in uoQry:\n\t\t\t\tif u.keyimagen:\n\t\t\t\t\tkimg=u.keyimagen.get()\n\t\t\t\t\tkys=(kimg.nombre,kimg.url)\n\t\t\t\telse:\n\t\t\t\t\tkys=(None,None)\n\t\t\t\tauxlu.append((u.key.id(),u.nombre,u.descrip,u.precio,u.grupo,kys,u.key.urlsafe() ))\n\t\t\tjsuotr[str(id)]=auxlu\n\t\tself.response.out.write(json.dumps({\"ok\":\"ok\",\"otros\":lisotr,\"unotros\":jsuotr},ensure_ascii=False))","repo_name":"raulmar/proyecto-pizweb","sub_path":"otros2.py","file_name":"otros2.py","file_ext":"py","file_size_in_byte":6769,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"1813854551","text":"from tkinter import *\n\nroot = Tk() # 计算器\nv1 = StringVar()\nv2 = StringVar()\nv3 = StringVar()\nv4 = StringVar()\n\n\ndef test(content):\n return content.isdigit()\n\ntestCMD = root.register(test)\ne1 = Entry(root,textvariable = v1,validate = \"key\",\\\n validatecommand = (testCMD,\"%P\")).grid(row=0, column=0)\n\n#Label(root,text = \"+\").grid(row=0, column=1)\n\ne2 = Entry(root,textvariable = v2,validate = \"key\",\\\n validatecommand = (testCMD,\"%P\")).grid(row=0, column=2)\n\nLabel(root,text = \"=\").grid(row=0, column=3)\n\ne3 = Entry(root,textvariable = v3,state = \"readonly\",\\\n validatecommand = (testCMD,\"%P\")).grid(row=0, column=4)\n\ndef calc():\n result = int(v1.get())+int(v2.get())\n v3.set(str(result))\n \n \n\n\nButton(root,text=\"计算器\",command = calc).grid(row=1, column=2)\nmainloop()\n\n'''\ndef compute(a):\n if a == 1:\n print(\"111\")\n return 1\n elif a == 2:\n print(\"222\")\n return 2\n elif a == 3:\n return 2\n elif a == 4:\n return 4\n else:\n pass\n\n\nCheckbutton(root,text= \"➕\",variable = v4 ,command = compute(1)).grid(row=0, column=1)\nCheckbutton(root,text= \"➖\",variable = v4 ,command = compute(2)).grid(row=1, column=1)\nCheckbutton(root,text= \"✖\",variable = v4 ,command = compute(3)).grid(row=2, column=1)\nCheckbutton(root,text= \"➗\",variable = v4 ,command = compute(4)).grid(row=3, column=1)\n\ndef calc(b):\n if b == 1:\n result = int(v1.get())+int(v2.get())\n v3.set(str(result))\n elif b == 2:\n result = int(v1.get())-int(v2.get())\n v3.set(str(result))\n elif b == 3:\n result = int(v1.get())*int(v2.get())\n v3.set(str(result))\n elif b == 4:\n result = int(v1.get())/int(v2.get())\n v3.set(str(result))\n else:\n pass\n\n'''","repo_name":"tangmessi/pycharm_git_practice","sub_path":"tkinter_calc.py","file_name":"tkinter_calc.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"20196956778","text":"# 재귀 알고리즘(recursive algorithms): 하나의 함수에서 자신을 다시 호출하여 작업을 수행하는 것\n# 재귀 알고리즘의 종결 조건: 알고리즘의 종결조건에 반드시 필요\n\n# 문제: 1 부터 N 까지의 모든 자연수의 합을 구하시오.\n\n# Recursive version\ndef sum_recursive_version(n):\n if n <= 1: # 종결조건 매우 중요!\n return n\n else:\n return n + sum(n - 1)\n\n\nN = int(input('Number: '))\nprint(sum_recursive_version(N))\n\n# 알고리즘 복잡도: n 이 커지면 n 에 따라서 함수를 호출해야하는 횟수가 증가하기 때문에 n 에 비례하는 복잡도를 가짐, O(n)\n# 알고리즘 효율성: iterative version 보다 효율성이 떨어짐\n\n# Iterative version\n\n\ndef sum_iterative_version(n):\n s = 0\n while n >= 0:\n s += n\n n -= 1\n return s\n\n\nn = int(input('Number: '))\nprint(sum_iterative_version(n))\n\n# 알고리즘 복잡도: n 이 커지면 n 에 따라서 순환문 반복 횟수가 증가하기 때문에 n 에 비례하는 복잡도를 가짐, O(n)\n# 알고리즘 효율성: recursive version 보다 효율성이 좋음\n\n\ndef sum_good_version(n):\n return n * (n + 1) // 2\n\n# 알고리즘 복잡도: O(1)\n\n# 재귀 알고리즘 추가 예제\n# 1 부터 n 까지의 모든 자연수를 곱하는 문제이며 n! 을 구하는 문제\n\n\ndef what(n):\n if n <= 1:\n return 1\n else:\n return n * what(n - 1)\n\n# 피보나치 순열 구현하기\n\n# Recursive version\n\n\ndef solution_recursive_version(x):\n answer = 0\n if x < 2:\n return x\n else:\n answer = solution_recursive_version(\n x - 1) + solution_recursive_version(x - 2)\n\n return answer\n\n# Iterative version\n\n\ndef solution_iterative_version(x):\n answer = 0\n fa = 0\n fb = 1\n while x > 0:\n x -= 1\n fa, fb = fb, fa+fb\n answer = fa\n return answer\n","repo_name":"kthdd1234/programmers_datastructure-algorithm_basic-lecture","sub_path":"recursive algorithms.py","file_name":"recursive algorithms.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"29948002190","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 3 20:22:27 2021\n\n@author: Admin\n\"\"\"\n\n#ism = \"Samandar\"\n\n#print('mening ismim '+ism)\n\n\n\n#a=4\n#b = 5\n\n#print((a+b)**2)#darajaga ko'tarish amali\n\n#o'zgaruvchi nomida provel tashlash mumkin emas\n #o'zgaruvchi 2 ta so'zdan iborat bo'lsa unda pastki chiziqcha ishlatish mumkin\n \nradi=4\npi=3.14\naylana=radi*pi*2\nprint(aylana)","repo_name":"yagodeveloper/Python-amalyotlari","sub_path":"yago2.py","file_name":"yago2.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"70993592757","text":"import matplotlib.pyplot as plt\n\nx = list(range(1, 1001))\ny = [x**2 for x in range(1, 1001)]\n\nplt.scatter(x, y, c=y, cmap=plt.cm.Blues, edgecolors='none', s=40)\nplt.title(\"Suqare Numbers\", fontsize=24)\nplt.xlabel(\"Value\", fontsize=14)\nplt.ylabel(\"Square Value\", fontsize=14)\nplt.tick_params(axis='both', which='major', labelsize=14)\n\nplt.axis([0, 1100, 0, 1100000])\n# plt.show()\nplt.savefig(\"squares_plot.png\", bbox_inches=\"tight\")","repo_name":"buzzzzx/data_visualization_easy","sub_path":"matplotlib_test/scatter_squares.py","file_name":"scatter_squares.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"19472125367","text":"from copy import deepcopy\n\nimport numpy as np\nimport tensorflow as tf\nfrom graph_attribution.featurization import MolTensorizer, smiles_to_graphs_tuple\nfrom graph_attribution.graphs import get_graphs_tf, get_num_graphs\nfrom graph_nets.utils_tf import data_dicts_to_graphs_tuple\nfrom rdkit.Chem import AllChem, DataStructs, MolFromSmiles\nfrom tqdm import tqdm\n\nfrom xaibench.color_utils import get_batch_indices\nfrom xaibench.train_gnn import DEVICE\n\nFP_SIZE = 1024\nBOND_RADIUS = 2\n\n\ndef gen_dummy_atoms(mol, dummy_atom_no=47):\n \"\"\"\n Given a specific rdkit mol, returns a list of mols where each individual atom\n has been replaced by a dummy atom type.\n \"\"\"\n mod_mols = []\n\n for idx_atom in range(mol.GetNumAtoms()):\n mol_cpy = deepcopy(mol)\n mol_cpy.GetAtomWithIdx(idx_atom).SetAtomicNum(dummy_atom_no)\n mod_mols.append(mol_cpy)\n return mod_mols\n\n\ndef featurize_ecfp4(mol, fp_size=FP_SIZE, bond_radius=BOND_RADIUS):\n \"\"\"\n Gets an ECFP4 fingerprint for a specific rdkit mol. \n \"\"\"\n fp = AllChem.GetMorganFingerprintAsBitVect(mol, bond_radius, nBits=fp_size)\n arr = np.zeros((1,), dtype=np.float32)\n DataStructs.ConvertToNumpyArray(fp, arr)\n return arr\n\n\ndef pred_pairs(pair_df, model, batch_size=16):\n tensorizer = MolTensorizer()\n\n g_i, g_j = (\n smiles_to_graphs_tuple(pair_df[\"smiles_i\"], tensorizer),\n smiles_to_graphs_tuple(pair_df[\"smiles_j\"], tensorizer),\n )\n preds_diff = []\n\n n = get_num_graphs(g_i)\n indices = get_batch_indices(n, int(batch_size / 2))\n\n for idx in tqdm(indices):\n with DEVICE:\n b_i, b_j = get_graphs_tf(g_i, idx), get_graphs_tf(g_j, idx)\n pred_i, pred_j = model(b_i), model(b_j)\n pred = pred_i - pred_j\n preds_diff.extend(pred.numpy()[:, 0].tolist())\n return preds_diff\n\n\ndef pred_pairs_diff(pair_df, model, mol_read_f=MolFromSmiles):\n preds_diff = []\n\n for row in tqdm(pair_df.itertuples(), total=len(pair_df)):\n sm_i, sm_j = getattr(row, \"smiles_i\"), getattr(row, \"smiles_j\")\n mol_i, mol_j = mol_read_f(sm_i), mol_read_f(sm_j)\n fp_i, fp_j = featurize_ecfp4(mol_i), featurize_ecfp4(mol_j)\n pred_i, pred_j = (\n model.predict(fp_i[np.newaxis, :]).squeeze(),\n model.predict(fp_j[np.newaxis, :]).squeeze(),\n )\n pred = pred_i - pred_j\n preds_diff.append(pred)\n return preds_diff\n\n\ndef diff_mask(\n mol_string,\n pred_fun,\n fp_size=1024,\n bond_radius=2,\n dummy_atom_no=47,\n mol_read_f=MolFromSmiles,\n):\n \"\"\"\n Given a mol specified by a string (SMILES, inchi), uses Sheridan's method (2019)\n alongside an sklearn model to compute atom attribution.\n \"\"\"\n mol = mol_read_f(mol_string)\n og_fp = featurize_ecfp4(mol, fp_size, bond_radius)\n\n og_pred = pred_fun(og_fp[np.newaxis, :]).squeeze()\n\n mod_mols = gen_dummy_atoms(mol, dummy_atom_no)\n\n mod_fps = [featurize_ecfp4(mol, fp_size, bond_radius) for mol in mod_mols]\n mod_fps = np.vstack(mod_fps)\n mod_preds = pred_fun(mod_fps).squeeze()\n return og_pred - mod_preds\n\n\ndef gen_masked_atom_feats(og_g):\n \"\"\" \n Given a graph, returns a list of graphs where individual atoms\n are masked.\n \"\"\"\n masked_gs = []\n for node_idx in range(og_g[0][\"nodes\"].shape[0]):\n g = deepcopy(og_g)\n g[0][\"nodes\"][node_idx] *= 0.0\n masked_gs.append(g[0])\n return masked_gs\n\n\ndef diff_gnn(smiles, model):\n \"\"\" \n Given a SMILES string, uses Sheridan's method (2019) alongside\n a trained GNN model to compute atom attribution.\n \"\"\"\n tensorizer = MolTensorizer()\n\n og_g = tensorizer.transform_data_dict([smiles])\n masked_gs = gen_masked_atom_feats(og_g)\n\n og_gt = data_dicts_to_graphs_tuple(og_g)\n gts = data_dicts_to_graphs_tuple(masked_gs)\n with DEVICE:\n og_pred = model(og_gt)\n mod_preds = model(gts)\n return tf.squeeze(og_pred - mod_preds).numpy()\n\n","repo_name":"josejimenezluna/xaibench_tf","sub_path":"xaibench/diff_utils.py","file_name":"diff_utils.py","file_ext":"py","file_size_in_byte":3961,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"4"} +{"seq_id":"2707504743","text":"# Name: Swapnanil Ray\n# Date: 05/10/2022\n\nclass Solution:\n def interpret(self, command: str) -> str:\n newStr = \"\"\n for i in range(len(command)):\n if command[i] == \"G\":\n newStr = newStr + \"G\"\n elif command[i] == \"(\":\n if command[i + 1] == \")\":\n newStr = newStr + \"o\"\n i = 2\n elif command[i + 1] == \"a\":\n newStr = newStr + \"al\"\n i = 4\n return newStr\n","repo_name":"tanyagupta0201/LeetCode-Problems-Solutions","sub_path":"PYTHON/1678_GoalParserInterpretation.py","file_name":"1678_GoalParserInterpretation.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"4"} +{"seq_id":"13625760","text":"from p5 import *\nclass star:\n r = 16\n\n def __init__(self):\n self.x = random_uniform(width/2, -width/2)\n self.y = random_uniform(height/2, -height/2)\n self.z = random_uniform(width, 0)\n self.pz = self.z\n self.px = self.x\n self.py = self.y\n\n def update(self, speed):\n self.z -= speed\n if self.z < 1:\n self.x = random_uniform(width/2, -width/2)\n self.y = random_uniform(height/2, -height/2)\n self.z = width\n self.pz = self.z\n self.px = self.x\n self.py = self.y\n\n\n\n def show(self):\n fill(255)\n no_stroke()\n\n sx = remap(self.x / self.z, (0, 1), (0, width))\n sy = remap(self.y / self.z, (0, 1), (0, height))\n sr = remap(self.z, (0, width), (self.r, 0))\n\n circle((sx, sy), sr)\n\n # px = remap(self.x / self.pz, (0, 1), (0, width))\n # py = remap(self.y / self.pz, (0, 1), (0, height))\n\n\n self.pz = self.z\n\n stroke(255)\n if sx != self.px:\n try:\n line((sx, sy), (self.px, self.py))\n except ValueError:\n print(\"Value Error:\", self.px, sx) # px and sx are the same value. sx is the same value twice in a row breaking line\n\n self.px = sx\n self.py = sy\n","repo_name":"HayaiAlex/Warp-Speed","sub_path":"star.py","file_name":"star.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"25588004610","text":"##Remove os itens repetidos da lista\n\ndef remover(lista):\n L = []\n for n in lista:\n if n not in L:\n L.append(n)\n return L\n\nlista = [1, 1, 1, 1,3, 5, 6, 7] \n\nlista = remover(lista)\nprint (lista)\n\n\n\n\n","repo_name":"cristianosantosoliveira/Python","sub_path":"Aula 02/Exercicio 01.py","file_name":"Exercicio 01.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"33647908957","text":"from sqlite3 import *\n\ndef create_database():\n with connect('architecture.db') as db:\n cursor = db.cursor()\n querys = [\"\"\" create table property_developer(name_company text not null primary key, adress_main_office text, hotline_phone_number text)\"\"\",\n\t\t\"\"\" create table building(id_building integer primary key autoincrement not null, adress_building text, type_building text, name_company text not null, foreign key (name_company) references property_developer(name_company))\"\"\", \n\t\t\"\"\" create table builder(id_builder integer primary key autoincrement, full_name text, salary integer,id_building integer not null, foreign key (id_building) references building(id_building))\"\"\"]\n for i in querys:\n cursor.execute(i)\n\n\ndef input_data():\n with connect('architecture.db') as db: \n cursor = db.cursor()\n data = [\"\"\" insert into property_developer values('SSK', 'Krasnodar, Fadeeva street, 214', '88002223550')\"\"\",\n\t\t\"\"\" insert into property_developer values('NVM', 'Krasnodar, 2th-Yamalskaya street, 1', '89612121741')\"\"\",\n\t\t\"\"\" insert into property_developer values('DOSTOYANIE', 'Rostov-on-Don, Filimonovskaya, 45', '88632506007')\"\"\",\n\t\t\"\"\" insert into building(adress_building, type_building, name_company) values('Stasovo street, 213', 'residential building', 'DOSTOYANIE')\"\"\",\n\t\t\"\"\"insert into building(adress_building, type_building, name_company) values('Stavropolskaya street, 34', 'residential building', 'SSK')\"\"\", \n\t\t\"\"\"insert into building(adress_building, type_building, name_company) values('Old-Kubanskaya street, 123', 'residential building', 'NVM')\"\"\",\n \"\"\"insert into builder(full_name, salary, id_building) values('Ivanov Ivan Ivanovich', 36000, 1)\"\"\",\n \"\"\"insert into builder(full_name, salary, id_building) values('Sirgay Antonovich Driche', 36000, 2)\"\"\",\n \"\"\"insert into builder(full_name, salary, id_building) values('Ivanov Ivan Daichev', 30000, 3)\"\"\"]\n for i in data:\n\t cursor.execute(i) \n\n\n\ndef get_output_data_about_builder():\n with connect('architecture.db') as db:\n cursor = db.cursor()\n data = [\"\"\"select * from builder\"\"\", \"\"\"select * from property_developer\"\"\", \"\"\"select * from building\"\"\"]\n for i in data:\n cursor.execute(i)\n print(cursor.fetchall())\n\n\n\ncreate_database()\ninput_data()\n\n\n","repo_name":"SeemerGG/Python_lab","sub_path":"IT_BD/IT_BD.py","file_name":"IT_BD.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"7124923213","text":"import pandas as pd\nimport os\n\n\nparent_dir = os.path.dirname(__file__) + \"/\"\n\n#name of csv file\ndf = pd.read_csv(\"changeMe.csv\")\n\n\n#change name of title, here: \"changeMe_Task\"\nfor i in range(df[\"changeMe_Task\"].count()):\n \n count = 0\n #change name of title, here: \"ChangeMe_Difficulty\"\n challengePath = os.path.join(parent_dir, \"a\" + str(int(df.loc[i, [\"ChangeMe_Difficulty\"]])))\n\n if os.path.isdir(challengePath):\n\n for path in os.listdir(challengePath):\n if os.path.isfile(os.path.join(challengePath, path)):\n count += 1\n\n f = open(str(challengePath) + \"/\" + str(count) + \".txt\", \"w\")\n\n #change name of title, here: \"changeMe_Task\"\n f.write(str(df.loc[i][\"changeMe_Task\"]))\n f.close\n \n else:\n os.makedirs(challengePath)\n f = open(str(challengePath) + \"/\" + str(count) + \".txt\", \"w\")\n\n #change name of title, here: \"changeMe_Task\"\n f.write(str(df.loc[i][\"changeMe_Task\"]))\n f.close\n","repo_name":"AntVil/challenge-app","sub_path":"other/folderCreater.py","file_name":"folderCreater.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"2966605634","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\nclass Solution:\n # @param {TreeNode} root\n # @return {integer[]}\n def preorderTraversal(self, root):\n \tresult = []\n \tstack = []\n\n \tif not root:\n \t\treturn result\n\n \twhile stack or root:\n \t\twhile root:\n \t\t\tresult.append(root.val)\n \t\t\tstack.append(root)\n \t\t\troot = root.left\n \t\troot = stack.pop()\n \t\troot = root.right\n\n \treturn result\n","repo_name":"JasonWayne/leetcode","sub_path":"binary_tree_preorder_traversal.py","file_name":"binary_tree_preorder_traversal.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"72194609396","text":"from .benchmark import HybridBenchmark\nfrom .config import Config\nfrom .sparql import RequestHandler\nfrom .utils import LoggingBase\nfrom .enums import ModelType, DatasetType\n\nfrom fire import Fire\n\n\ndef main(\n selection_score: float = None\n):\n \"\"\"\n NOT USED?\n\n :param selection_score:\n :return:\n \"\"\"\n config = Config()\n logger = LoggingBase(\n config=config.logging\n ).logger\n\n request_handler = RequestHandler(\n config=config,\n logger=logger\n )\n\n config.run.model.type = ModelType.HYBRID_SELECTIVE_MODEL\n config.run.dataset.type = DatasetType.MULTI_SECOND_LEVEL_ALL_BASED\n\n if selection_score is None:\n selection_score = 0\n config.run.benchmark.hybrid.minimum_threshold = selection_score\n\n model_ids = [\n \"sentence-transformers/paraphrase-mpnet-base-v2\"\n ]\n\n benchmarking = HybridBenchmark(\n config=config,\n logger=logger,\n request_handler=request_handler,\n unsupervised_model_ids=model_ids,\n supervised_model_id=\"mlflow:/ghent_words_bert_level_1\",\n unsupervised_model_type=ModelType.EMBEDDING_REGULAR,\n checkpoint_dir=\"data/4a939084f1c14b45a4f0a0e45a5c1864\"\n )\n benchmarking()\n\n\nif __name__ == \"__main__\":\n Fire(main)\n","repo_name":"semantic-ai/probe-sparql-mono-service","sub_path":"src/run_specific_hybrid_model.py","file_name":"run_specific_hybrid_model.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"74162613878","text":"\nimport json, hmac, hashlib, time, requests, base64\nfrom requests.auth import AuthBase\nimport numpy as np\nfrom datetime import datetime\nimport time\n\n\n# Create custom authentication for Exchange\nclass CoinbaseExchangeAuth(AuthBase):\n def __init__(self, api_key, secret_key, passphrase):\n self.api_key = api_key\n self.secret_key = secret_key\n self.passphrase = passphrase\n\n def __call__(self, request):\n timestamp = str(time.time())\n message = timestamp + request.method + request.path_url + (request.body or '')\n hmac_key = base64.b64decode(self.secret_key)\n signature = hmac.new(hmac_key, message, hashlib.sha256)\n signature_b64 = signature.digest().encode('base64').rstrip('\\n')\n\n request.headers.update({\n 'CB-ACCESS-SIGN': signature_b64,\n 'CB-ACCESS-TIMESTAMP': timestamp,\n 'CB-ACCESS-KEY': self.api_key,\n 'CB-ACCESS-PASSPHRASE': self.passphrase,\n 'Content-Type': 'application/json'\n })\n return request\n\napi_url = 'https://api.pro.coinbase.com/'\n#auth = CoinbaseExchangeAuth(API_KEY, API_SECRET, API_PASS)\n\ncurrency_pairs = ['ETH-EUR', 'ETH-BTC', 'BTC-EUR']\n\nclass Arbitrage_bot:\n def __init__(self, c_pairs):\n \n self.c_pair1, self.c_pair2, self.c_pair3 = c_pairs\n self.pairs = [self.c_pair1, self.c_pair2, self.c_pair3]\n \n self.api_url = 'https://api.pro.coinbase.com/'\n #self.auth = CoinbaseExchangeAuth(API_KEY, API_SECRET, API_PASS)\n \n def bid_ask_quotes(self):\n \n price_arrays = []\n\n for x in self.pairs:\n quotes = requests.get(api_url + '/products/{}/book'.format(x)).json()\n price_arrays.append(np.array([[quotes[\"bids\"][0][0],quotes[\"asks\"][0][0]]], dtype=float))\n\n return price_arrays\n\n def cross_rate_perc(self):\n pair1, pair2, pair3 = self.bid_ask_quotes()\n\n cross_rate = np.multiply(pair2, pair3)\n\n return ((cross_rate/pair1) - 1)*100, cross_rate, pair1, datetime.now().isoformat(timespec='microseconds')\n\n\ntest = Arbitrage_bot(currency_pairs)\ncount= 0\nwhile True:\n \n while True:\n try:\n with open(\"test3.txt\", \"a\") as f:\n cross_per, cross_rate, pair1, timer = test.cross_rate_perc()\n\n\n f.write(\"{},{},{},{},{},{},{}\\n\".format(\n timer,\n cross_per[0,0],\n cross_per[0,1],\n cross_rate[0,0],\n cross_rate[0,1],\n pair1[0,0],\n pair1[0,1],\n )\n )\n count +=1\n\n except:\n print(\"Fuck, they got me!\")\n \n \n \n \n \n time.sleep(10)\n print(count)\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n","repo_name":"alpenmilch411/Triangular-Arbitrage","sub_path":"coinbase/data_mining.py","file_name":"data_mining.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"5319387231","text":"from typing import List\n\n\nclass Solution:\n def combination_sum(self, nums: List, target: int):\n result = []\n self.helper(nums, target, 0, [], result)\n return result\n\n def helper(self, nums: List, target: int, index: int, subset: List, result: List):\n if target == 0:\n result.append(list(subset))\n elif target > 0 and index < len(nums):\n self.helper(nums, target, index + 1, subset, result)\n subset.append(nums[index])\n self.helper(nums, target-nums[index], index, subset, result)\n subset.pop()\n\n\nif __name__ == '__main__':\n s = Solution()\n ret = s.combination_sum([2, 3, 5], target=8)\n print(ret)\n","repo_name":"pythonercbq123/leetcode","sub_path":"2023-01-30 ~ 2022-02-05/combinationsum.py","file_name":"combinationsum.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"11101811147","text":"# try and except are keyword used to hanlde error and avoid programme crashing\n# first runt he programme which you write and try diferent posibiities and then check which are the error\n# according to that identify the error and use that eror as except key as shown in this programme\n# in this prohgramme when we enter any string value it will throw an eror like ValueError\n#so we will use that after except keyword so, it will ignore that and print message whatever we wamt to show\n#we can try all posibilities of error and solve it using except and can pront messgae regarding that\ntry:\n age=int(input(\"enter age: \"))\n income = 20000\n risk=income/age\n print(f'your age is {age}')\n print(f'your risk is {risk}')\n \nexcept ValueError:\n print('invalid input, try again')\nexcept ZeroDivisionError:\n print('age cannot be zero 0.')","repo_name":"sharmaronak79/python","sub_path":"error_handling.py","file_name":"error_handling.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"4975521755","text":"import json\nfrom fastapi import APIRouter, Depends, status, HTTPException\nfrom sqlalchemy.orm import Session\nfrom app.database import get_db\nfrom .. import utils, models\nfrom ..schemas import team_season, team\n\nrouter = APIRouter(\n prefix='/teams',\n tags=['Team']\n)\n\n@router.post('/', status_code=status.HTTP_201_CREATED)\ndef create_team(team: team.TeamCreate, db: Session = Depends(get_db)):\n db_team = models.Team(**team.model_dump())\n db.add(db_team)\n db.commit()\n db.refresh(db_team)\n\n return db_team\n\n@router.get('/', response_model=team.TeamPlayerList)\ndef get_teams(db: Session = Depends(get_db)):\n teams = db.query(models.Team, models.Player).join(models.Player, models.Player.id == models.Team.player_id).all()\n return {\"data\": teams}\n\n@router.post('/seasons')\ndef create_team_season(team_season: team_season.TeamSeasonCreate, db: Session = Depends((get_db))):\n db_team_season = models.TeamSeason(**team_season.model_dump())\n db.add(db_team_season)\n db.commit()\n db.refresh(db_team_season)\n return db_team_season\n\n@router.get('/seasons', response_model=team_season.TeamSeasonList)\ndef get_team_seasons(db: Session = Depends(get_db)):\n team_seasons = db.query(models.TeamSeason, models.Team, models.Season).join(models.Season, models.Season.id == models.TeamSeason.season_id).join(models.Team, models.Team.id == models.TeamSeason.team_id).all()\n\n return {\"data\": team_seasons}\n\n@router.get(\"/seasons/{id}\")\ndef get_teams_seasons(id: int, db: Session = Depends(get_db)):\n team_seasons = db.query(models.TeamSeason, models.Team, models.Season).join(models.Season, models.Season.id == models.TeamSeason.season_id).join(models.Team, models.Team.id == models.TeamSeason.team_id).filter(models.Team.id == id).all()\n\n if not team_seasons:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"team with id: {id} was not found\")\n \n return team_seasons\n\n@router.post('/players')\ndef create_player_team_season(player_team_season: team_season.PlayerTeamSeasonCreate, db: Session = Depends(get_db)):\n db_player_team_season = models.PlayerTeamSeason(**player_team_season.model_dump())\n db.add(db_player_team_season)\n db.commit()\n db.refresh(db_player_team_season)\n return db_player_team_season\n\n@router.get('/players')\ndef get_season_roster(db: Session = Depends(get_db)):\n team_roster = db.query(models.PlayerTeamSeason).all()\n return team_roster\n\n@router.get('/players/{id}', response_model=team_season.TeamRosterResponse)\ndef get_players_by_team(id: int, db: Session = Depends(get_db)):\n players = db.query(models.PlayerTeamSeason, models.Player\n ).filter(models.PlayerTeamSeason.team_season_id ==id\n ).join(models.Player, models.Player.id == models.PlayerTeamSeason.player_id\n ).order_by(models.PlayerTeamSeason.is_player).all()\n return {'data': players}","repo_name":"jack9rob/leagueAPI","sub_path":"app/routers/team.py","file_name":"team.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"43334833068","text":"from . import room\nimport random\nimport webbrowser\nimport turtle\n\n#bteytt\ndef do():\n\tpass\n\tprint (\"Velkominn í Herbergi 31\")\n\tdo1();\n\tskapur();\n\ndef do1():\n\tlykill=0\n\tsvar1 = input(\"Má bjóða þér að svara nokkrum einföldum spurningum?\\n \")\n\tif(svar1[0]=='j'):\n\t\tsvar2 = input(\"Hvað er 10 + 9?\\n \")\n\t\tif(svar2=='19'):\n\t\t\tsvar3 = input(\"Hvað er 71 + 3696?\\n \")\n\t\t\tif(svar3=='3767'):\n\t\t\t\tsvar4 = input(\"Hvað er 9299 + 8686?\\n \")\n\t\t\t\tif(svar4=='17.985'):\n\t\t\t\t\tsvar5 = input(\"Hvað er annað nafn Kristofers Breka?\\n \")\n\t\t\t\t\tif(svar5=='Breki'):\t\n\t\t\t\t\t\twebbrowser.open(\"http://www.clker.com/cliparts/R/3/A/t/s/n/olde-key-hi.png\")\n\t\t\t\t\t\tlykill=lykill+1\n\n\ndef skapur():\t\t\t\t\t\t\n\tprint(\"Voo! Þú fannst lykill og fékkst aðgang að þessu herbergi!\")\n\tprint(\"Þú labbar inn í herbergið og þú sérð skáp og rúm\")\n\tsvarsvar1= input(\"Hvort viltu opna skápinn með lyklinnum eða kíkja undir rúmið?(opna skáp/undir rúm)\")\n\tif(svarsvar1=='opna skáp'):\n\t\tbreki = turtle.Turtle()\n\t\tbreki.speed(10)\n\t\tfor i in range(10):\n\t\t\tbreki.forward(100)\n\t\t\tbreki.right(30)\n\t\t\tbreki.forward(20)\n\t\t\tbreki.left(60)\n\t\t\tbreki.forward(50)\n\t\t\tbreki.right(30)\n\n\t\t\tbreki.penup()\n\t\t\tbreki.setposition(0, 0)\n\t\t\tbreki.pendown()\n\t\t\t\n\t\t\tbreki.right(2)\n\t\tturtle.bye()\n\t\tturtle.bye()\n\t\tsvarsvar1=input(\"Jey! þú fannst hlutinn! labba út eða kíkja undir rúmið?(labba út/kíkja undir rúm\")\n\t\tif svarsvar1==\"labba út\":\n\t\t\tpass\n\n\tif(svarsvar1=='kíkja undir rúm'):\n\t\tprint(\"Þú gramsar undir rúminnu og finnur hlutinn úr herbergi 34!\")\n\n\n","repo_name":"Forritarar-FS/Kastali","sub_path":"pythonHus/room31.py","file_name":"room31.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"is","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"35922527308","text":"###details###\r\n#this is a bot that plays minesweeper im really lazy so use this link https://minesweeperonline.com/#beginner\r\n#at 300% zoom in on the crt only with the scroll bars in the top left corner\r\n\r\n###imports###\r\nimport pyautogui as pag\r\nimport pytesseract, cv2, time\r\nfrom pytesseract import image_to_string\r\nfrom PIL import Image\r\n\r\n\r\n\r\n###variables###\r\n#mouse cordinates\r\nmiddleBlock = (295, 545)\r\nfireFox = (533, 842)\r\ncurrentTab = (590, 762)\r\n\r\nstartLeft = 252\r\nstartRow = 317\r\n\r\ncols = []\r\n\r\ndef startClick(x, y, z):\r\n pag.click(x)\r\n pag.click(y)\r\n pag.click(z)\r\n\r\ndef rowSlicer(capture, left, row):\r\n screenshot = pag.screenshot(region=(left, row, 48, 45))\r\n screenshot.save(capture + \".png\")\r\n\r\ndef checker(counter):\r\n pytesseract.pytesseract.tesseract_cmd=r'C:\\Users\\abrown\\AppData\\Local\\Programs\\Tesseract-OCR'\r\n img = cv2.imread(str(counter) + '.png')\r\n HSV_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\r\n h,s,v = cv2.split(HSV_img)\r\n thresh = cv2.threshold(v, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]\r\n checker.txt = image_to_string(thresh, config=\"--psm 6 digits\")\r\n\r\ndef hiddenCheck(counter):\r\n im = Image.open(str(counter) + '.png')\r\n from collections import defaultdict\r\n by_color = defaultdict(int)\r\n for pixel in im.getdata():\r\n by_color[pixel] += 1\r\n hiddenCheck.hidden = by_color\r\n\r\ndef mousePos():\r\n x, y = pag.position()\r\n print(x, y)\r\n###main event###\r\nwhile True:\r\n time.sleep(3)\r\n #mousePos()\r\n #startClick(fireFox, currentTab, middleBlock)\r\n for row in range(0, 9):\r\n row = []\r\n counter = 0\r\n for i in range(0, 2):\r\n counter = counter + 1\r\n rowSlicer(str(counter), startLeft, startRow)\r\n startLeft = startLeft + 46\r\n counter = 0\r\n startRow = startRow + 42\r\n for i in range(0, 9):\r\n counter = counter + 1\r\n checker(counter)\r\n if checker.txt[:1] == '1':\r\n row.append('1')\r\n elif checker.txt[:1] == '2':\r\n row.append('2')\r\n elif checker.txt[:1] == '3':\r\n row.append('3')\r\n else:\r\n hiddenCheck(counter)\r\n #print(hiddenCheck.hidden)\r\n #print('**********************************************')\r\n row.append('e')\r\n cols.append(row)\r\n startLeft = 94\r\n for row in cols:\r\n print(row)\r\n #mousePos()\r\n break","repo_name":"Albert-7317/Minesweeper_Solver","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"14188588830","text":"from data_parser import parseData\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Parameters\nlearning_rate = 0.0008\nnum_steps = 500000\nbatch_size = 128\ndisplay_step = 10000\n\n# Network Parameters\nn_hidden_1 = 16 # 1st layer number of neurons\nn_hidden_2 = 16 # 2nd layer number of neurons\nn_hidden_3 = 8 # 3rd layer number of neurons\n#num_input = 5*5*5*5 # feature size\nnum_input = 5*7 # feature size\nnum_classes = 5 # 5 players\n\ndef main():\n \n #get data from source\n data, res, merlins, mostCorrects, percivals, vts, _ = parseData()\n num_train = int(len(merlins) * .9)\n data_train = np.array(data[:num_train])\n data_test = np.array(data[num_train:])\n merlins_test = merlins[num_train:]\n res_test = res[num_train:]\n X_train = data_train.reshape( (data_train.shape[0], -1) )\n Y_train = np.array(merlins[:num_train])\n Y_train = one_hot_encode(Y_train)\n X_test = data_test.reshape( (data_test.shape[0], -1) )\n Y_test = np.array(merlins_test)\n Y_test = one_hot_encode(Y_test)\n print(Y_test.shape)\n \n #tf data\n #tfdata_train = tf.data.Dataset.from_tensor_slices((X_train, Y_train))\n #iterator = tfdata_train.make_one_shot_iterator()\n #next_element = iterator.get_next()\n \n # tf Graph input\n X = tf.placeholder(\"float\", [None, num_input])\n Y = tf.placeholder(\"float\", [None, num_classes])\n\n # Store layers weight & bias\n weights = {\n 'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),\n 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n 'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])),\n 'out': tf.Variable(tf.random_normal([n_hidden_3, num_classes]))\n }\n biases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n 'b3': tf.Variable(tf.random_normal([n_hidden_3])),\n 'out': tf.Variable(tf.random_normal([num_classes]))\n }\n\n # Construct model\n logits = neural_net(X, weights, biases)\n prediction = tf.nn.softmax(logits)\n\n # Define loss and optimizer\n loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=Y))\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss_op)\n\n # Evaluate model\n correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n # Initialize the variables (i.e. assign their default value)\n init = tf.global_variables_initializer()\n\n # Start training\n accs = []\n ts = []\n losses = []\n print(\"Starting training\")\n print(\"hidden 1 size: \" + str(n_hidden_1))\n print(\"hidden 2 size: \" + str(n_hidden_2))\n print(\"hidden 3 size: \" + str(n_hidden_3))\n print(\"batch size: \" + str(batch_size))\n print(\"iterations: \" + str(num_steps))\n with tf.Session() as sess:\n\n # Run the initializer\n sess.run(init)\n\n for step in range(1, num_steps+1):\n #batch_x, batch_y = mnist.train.next_batch(batch_size)\n ridx = np.random.randint(num_train, size=batch_size)\n batch_x = X_train[ridx]\n batch_y = Y_train[ridx]\n # Run optimization op (backprop)\n sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})\n if step % display_step == 0 or step == 1:\n # Calculate batch loss and accuracy\n loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,\n Y: batch_y})\n print(\"Step \" + str(step) + \", Minibatch Loss= \" + \\\n \"{:.4f}\".format(loss) + \", Training Accuracy= \" + \\\n \"{:.3f}\".format(acc))\n\n accs.append(acc)\n ts.append(step)\n losses.append(loss)\n\n print(\"Optimization Finished!\")\n\n # Calculate accuracy for test data\n print(\"Testing Accuracy:\", \\\n sess.run(accuracy, feed_dict={X: X_test,\n Y: Y_test}))\n\n plt.figure(\"Training Accuracy\")\n plt.xlabel(\"Time Step\")\n plt.ylabel(\"Batch Accuracy\")\n #plt.ylim(0, 1)\n plt.plot(ts, accs)\n plt.savefig(\"training_acc\", bbox_inches=\"tight\")\n \n plt.figure(\"Training Loss\")\n plt.xlabel(\"Time Step\")\n plt.ylabel(\"Batch Loss\")\n plt.plot(ts, losses)\n plt.savefig(\"training_loss\", bbox_inches=\"tight\")\n\n# Create model\ndef neural_net(x, weights, biases):\n # Hidden fully connected layer \n layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n # Hidden fully connected layer \n layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n # Hidden fully connected layer \n layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])\n # Output fully connected layer with a neuron for each class\n out_layer = tf.matmul(layer_3, weights['out']) + biases['out']\n return out_layer\n\ndef one_hot_encode(y):\n new_y = np.zeros( (y.shape[0], num_classes) )\n for i in range(y.shape[0]):\n new_y[i][y[i]] = 1\n return new_y\n\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"chuchro3/AvalonAssassin","sub_path":"nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":5282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"31557800559","text":"class Split_Text_File:\r\n def __init__(self, file):\r\n self.file_to_split = file\r\n\r\n def create_files(self):\r\n lines = 10000\r\n small_file = None\r\n\r\n with open(self.file_to_split, mode='r', encoding='utf-8') as big_file:\r\n for line_num, line in enumerate(big_file):\r\n if line_num % lines == 0:\r\n if small_file:\r\n small_file.close()\r\n small_filename = f'Smaller Files\\end_line_{line_num + lines}.txt'\r\n small_file = open(small_filename, \"w\", encoding='utf-8')\r\n small_file.write(line)\r\n if small_file:\r\n small_file.close()\r\n\r\nif __name__ ==\"__main__\":\r\n test = Split_Text_File('Data\\YoutubeComment.txt')\r\n test.create_files()\r\n","repo_name":"GarMoore18/YouTube-Comment-Sentiment-Analysis","sub_path":"Split_Text_File.py","file_name":"Split_Text_File.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"20434976050","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# coding=utf8\n\nimport logging\n\n\ndef init_log():\n logging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s %(filename)s'\n '[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename='log.log',\n filemode='w')\n console = logging.StreamHandler()\n console.setLevel(logging.DEBUG)\n console.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n\n\nif __name__ == '__main__':\n init_log()\n logging.info(\"A info log.\")\n logging.debug(\"A debug log.\")\n","repo_name":"pengyuwei/learning-backend","sub_path":"python/base/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"28865051226","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 1 00:00:12 2020\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport os\r\nimport random\r\nimport math\r\nfrom sklearn.decomposition import PCA\r\nfrom scipy import io\r\n\r\nimport multiprocessing as mp\r\nimport time\r\n\r\n\r\ncores=os.cpu_count()\r\n#%%\r\n\r\n\r\nTrial_num={'s4t4d3': 299749, 's4t4d6': 304624, 's4t8d3': 321384, 's4t8d6': 314461, 's8t4d3': 320591, 's8t4d6': 313711, 's8t8d3': 257678, 's8t8d6': 245876}\r\n\r\nchs_idx=np.load('su_coding_choose.npy')\r\n\r\ndef main():\r\n t0=time.time()\r\n p=mp.Pool(processes=cores,maxtasksperchild=1)\r\n params=[(Trial_num,chs_idx) for n in range(100)]\r\n \r\n Samples=p.map(bootstrap,params,chunksize=1)\r\n# Samples=list(p.imap(bootstrap,params,chunksize=2))\r\n p.close()\r\n p.join()\r\n# =============================================================================\r\n# Samples=[]\r\n# for n in range(1):\r\n# Samples.append(bootstrap((Trial_num,chs_idx)))\r\n# =============================================================================\r\n \r\n Samples=np.asarray(Samples)\r\n (x,y,z)=Samples.shape\r\n# forsave=np.zeros((x,y,z))\r\n# for i in range(x):\r\n# for j in range(y):\r\n# for k in range(z):\r\n# forsave[x,y,z]=Samples[x,y,z]\r\n \r\n \r\n t1=time.time()-t0\r\n print('done at ',t1)\r\n io.savemat('C:\\\\NeuPix_data\\\\bootstrap_principal_component.mat',{'samples_of_pc':Samples})\r\n# io.savemat('C:\\\\NeuPix_data\\\\shuffles_principal_component.mat',{'shuffles_of_pc':Samples})\r\n#%% \r\ndef bootstrap(args): \r\n Path='C:\\\\welltrained'\r\n su_num=13709\r\n (trial_num,choose_idx)=args\r\n# rand_sam=[ [] for i in range(8)]\r\n samples=[ [] for i in range(8)]\r\n# counter=[0 for n in range(8)]\r\n Key=sorted(trial_num.keys())\r\n# for k in [0,1,4,5]:\r\n# rand_sam[k]=np.random.randint(0,trial_num[Key[k]]+trial_num[Key[k+2]],size=su_num)\r\n# rand_sam[k].sort() \r\n sample_fr=[] \r\n dirs=os.walk(Path)\r\n for d in dirs:\r\n print(d)\r\n if d[2]==[]:\r\n continue\r\n# os.chdir(d[0])\r\n \r\n fr_container=np.load(d[0]+'\\\\correct_trials_firingrate.npy')\r\n su=np.load(d[0]+'\\\\neuron_numbers.npy')\r\n su_list=[u[1] for u in choose_idx if u[0] in d[0]]\r\n for i,u in enumerate(fr_container):\r\n if not (str(su[i]) in su_list):\r\n continue\r\n \r\n baseline=[]\r\n for key in u.keys():\r\n base=[k[0:250] for k in u[key]]\r\n for k in base:\r\n baseline=baseline+list(k)\r\n averange=np.mean(baseline)\r\n std=np.std(baseline)\r\n if std==0:\r\n continue\r\n \r\n for k in [0,1,4,5]:\r\n key_container=np.concatenate((u[Key[k]],u[Key[k+2]]))\r\n rand_sam=[random.randint(0,len(key_container)-1) for num in range( random.randint(math.ceil(len(key_container)/2),len(key_container)) )]\r\n \r\n sam_list=[key_container[u_id] for u_id in rand_sam]\r\n samples[k]=sam_list\r\n \r\n \r\n# =============================================================================\r\n #for shuffle\r\n# xchg=random.sample(range(len(samples[0])),random.randint(math.ceil(len(samples[0])/2),len(samples[0])))\r\n# for t_num in xchg:\r\n# tchg=random.randint(0,len(samples[4])-1)\r\n# samples[0][t_num],samples[4][tchg]=samples[4][tchg],samples[0][t_num]\r\n# \r\n# xchg=random.sample(range(len(samples[1])),random.randint(math.ceil(len(samples[1])/2),len(samples[1])))\r\n# for t_num in xchg:\r\n# tchg=random.randint(0,len(samples[5])-1)\r\n# samples[1][t_num],samples[5][tchg]=samples[5][tchg],samples[1][t_num]\r\n# =============================================================================\r\n \r\n seq=[]\r\n for k in [0,1,4,5]: \r\n sam_list=np.asarray(samples[k]) \r\n sam_su=np.mean(sam_list,axis=0)\r\n \r\n seq=seq+[(x-averange)/std for x in sam_su]\r\n sample_fr.append(seq)\r\n #np.save('D:\\\\all_firingrate_samples_by_type.npy',all_sam) \r\n \r\n container=[]\r\n for u in sample_fr:\r\n u=np.asarray(u)\r\n b2t=u[np.r_[0:650,950:1900,2200:2850,3150:4100]]\r\n \r\n container.append(b2t)\r\n container=np.asarray(container)\r\n container=container.T\r\n \r\n pca=PCA(n_components=20)\r\n pric_com=pca.fit_transform(container)\r\n \r\n return pric_com\r\n \r\nif __name__ == '__main__':\r\n main() ","repo_name":"jhonbourne/NeuroPixels2019-2020","sub_path":"bootstrap_pca(1).py","file_name":"bootstrap_pca(1).py","file_ext":"py","file_size_in_byte":4765,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"72662612597","text":"import os\nimport json\nimport mock\n\nfrom tornado.testing import AsyncHTTPTestCase\nfrom tornado.web import Application\nfrom tornado.escape import json_encode\n\nfrom arteria.web.state import State\n\nfrom checksum.app import routes\nfrom checksum import __version__ as checksum_version\nfrom checksum.checksum_handlers import StartHandler\nfrom checksum.runner_service import RunnerService\nfrom tests.test_utils import DummyConfig\n\n\nclass TestChecksumHandlers(AsyncHTTPTestCase):\n\n API_BASE = \"/api/1.0\"\n\n runner_service = RunnerService()\n\n def get_app(self):\n return Application(\n routes(\n config=DummyConfig(),\n runner_service=self.runner_service))\n\n ok_runfolder = \"tests/resources/ok_checksums\"\n\n\nclass TestStartHandler(TestChecksumHandlers):\n @mock.patch(\"os.path.isdir\", return_value=True)\n @mock.patch(\"os.listdir\", return_value=[\"ok_checksums\", \"rf2\", \"rf3\"])\n def test__validate_runfolder_exists_ok(self, mock_listdir, mock_isdir):\n assert StartHandler._validate_runfolder_exists(\n \"ok_checksums\", \"tests/resources/\")\n\n @mock.patch(\"os.path.isdir\", return_value=False)\n def test__validate_runfolder_exists_not_ok(self, mock_isdir):\n assert not StartHandler._validate_runfolder_exists(\n \"invalid_checksums\", \"tests/resources/\")\n\n @mock.patch(\"os.path.isfile\", return_value=True)\n def test__validate_md5sum_path_ok(self, mock_isfile):\n assert StartHandler._validate_md5sum_path(\n runfolder=TestChecksumHandlers.ok_runfolder,\n md5sum_file_path=os.path.join(\n TestChecksumHandlers.ok_runfolder, \"md5_checksums\")\n )\n\n @mock.patch(\"os.path.isfile\", return_value=True)\n def test__validate_md5sum_nested_path_ok(self, mock_isfile):\n nested_runfolder = \"tests/resources/ok_nested_dir/\"\n assert StartHandler._validate_md5sum_path(\n runfolder=nested_runfolder,\n md5sum_file_path=os.path.join(\n nested_runfolder, \"./md5sums/empty_file\"))\n\n @mock.patch(\"os.path.isfile\", return_value=False)\n def test__validate_md5sum_path_not_ok(self, mock_isfile):\n assert not StartHandler._validate_md5sum_path(\n runfolder=TestChecksumHandlers.ok_runfolder,\n md5sum_file_path=os.path.join(\n TestChecksumHandlers.ok_runfolder, \"no_file\"))\n\n @mock.patch(\n \"checksum.runner_service.RunnerService.start\",\n return_value=1)\n @mock.patch(\n \"checksum.checksum_handlers\"\n \".StartHandler._validate_runfolder_exists\",\n return_value=True)\n @mock.patch(\n \"checksum.checksum_handlers\"\n \".StartHandler._validate_md5sum_path\",\n return_value=True)\n @mock.patch(\n \"checksum.checksum_handlers\"\n \".StartHandler._is_valid_log_dir\",\n return_value=True)\n def test_start_checksum(\n self,\n mock_valid_log,\n mock_valid_md5sum_path,\n mock_runfolder_exists,\n mock_start,\n ):\n job_id = mock_start.return_value\n\n body = {\"path_to_md5_sum_file\": \"md5_checksums\"}\n response = self.fetch(\n self.API_BASE + \"/start/ok_checksums\",\n method=\"POST\",\n body=json_encode(body))\n\n response_as_json = json.loads(response.body)\n\n self.assertEqual(response.code, 202)\n self.assertEqual(response_as_json[\"job_id\"], job_id)\n self.assertEqual(response_as_json[\"service_version\"], checksum_version)\n\n expected_link = (\n f\"http://127.0.0.1:{self.get_http_port()}/\"\n f\"api/1.0/status/{job_id}\")\n self.assertEqual(response_as_json[\"link\"], expected_link)\n self.assertEqual(response_as_json[\"state\"], State.STARTED)\n\n def test_raise_exception_on_log_dir_problem(self):\n with mock.patch(\n \"checksum.checksum_handlers.StartHandler._is_valid_log_dir\",\n return_value=False):\n body = {\"path_to_md5_sum_file\": \"md5_checksums\"}\n response = self.fetch(\n self.API_BASE + \"/start/ok_checksums\",\n method=\"POST\",\n body=json_encode(body))\n\n self.assertEqual(response.code, 500)\n\n\nclass TestStatusHandler(TestChecksumHandlers):\n def test_check_status(self):\n with mock.patch(\n \"checksum.runner_service.RunnerService.status\",\n return_value=State.DONE) as m:\n response = self.fetch(self.API_BASE + \"/status/1\")\n response_as_json = json.loads(response.body)\n self.assertEqual(response_as_json[\"state\"], State.DONE)\n m.assert_called_once_with(1)\n\n\nclass TestStopHandler(TestChecksumHandlers):\n def test_stop_all_checksum(self):\n with mock.patch(\"checksum.runner_service.RunnerService.stop_all\") as m:\n response = self.fetch(\n self.API_BASE + \"/stop/all\", method=\"POST\", body=\"\")\n self.assertEqual(response.code, 200)\n m.assert_called_once()\n\n def test_stop_one_checksum(self):\n with mock.patch(\"checksum.runner_service.RunnerService.stop\") as m:\n response = self.fetch(\n self.API_BASE + \"/stop/1\", method=\"POST\", body=\"\")\n self.assertEqual(response.code, 200)\n m.assert_called_once_with(1)\n\n def test_version(self):\n response = self.fetch(self.API_BASE + \"/version\")\n\n expected_result = {\"version\": checksum_version}\n\n self.assertEqual(response.code, 200)\n self.assertEqual(json.loads(response.body), expected_result)\n","repo_name":"arteria-project/arteria-checksum","sub_path":"tests/unit_tests/test_checksum_handlers.py","file_name":"test_checksum_handlers.py","file_ext":"py","file_size_in_byte":5751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"25394865217","text":"#Reverse order.\n#Write a function that takes a string as input, and\n#returns a string similar to the input, but with the\n#words in reverse order, and the punctuation marks \n#maintaining their original order.\n#e.g;\n#f(\"Hello. I'm Edwin A.J, and you?\") => \"You and. A.J Edwin I'm, Hello?\"\n#f(\"What time is it? Hammer time.\") => \"Time Hammer? It is time what.\"\n#Note: As shown in the example above, the order of the punctuation marks('?', ',', '.') have not changed.\n#Only the words have.\n\nimport re\n\n\ndef uppercase(matchobj):\n return matchobj.group(0).upper()\n\n\ndef capitalize(s):\n return re.sub('^([a-z])|[\\.|\\?|\\!]\\s*([a-z])|\\s+([a-z])(?=\\.)', uppercase, s)\n\n\ndef reverse(string):\n \"\"\"\n Function 'reverse' takes a string as input and returns a reversed order with the punctuation marks maintaining their original order.\n \"\"\"\n if not isinstance(string, str):\n return \"Invalid input.\"\n\n last_punc = string[-1]\n if last_punc.isalpha():\n return \"Punctuate your sentence properly.\"\n\n string = string[0:-1]\n new_string = []\n last_index = [0]\n punc = []\n for index, char in enumerate(string):\n # If a space following a punctuation is observed\n if not char.isalpha() and string[index+1] == \" \":\n new_string.append(string[last_index[-1]:index])\n punc.append(string[index:index+2])\n last_index.append(index+2)\n\n new_string.append(string[last_index[-1]:])\n for index, str_ in enumerate(new_string):\n # Reverses word per portion\n new_string[index] = \" \".join(str_.split()[::-1])\n\n # Reverses string\n new_string = new_string[::-1]\n new_string[-1] = new_string[-1].lower()\n\n final = \"\"\n for i in range(len(punc)):\n final += new_string[i]\n final += punc[i]\n\n final += new_string[-1]\n final += last_punc\n return capitalize(final)\n\n\n\nif __name__ == '__main__':\n sentence = input('Enter a sentence to reverse: ')\n print(reverse(str(sentence)))\n","repo_name":"Demonohu/ECX-30-Days-Of-Code-Python-","sub_path":"day_18/reverse_order.py","file_name":"reverse_order.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"37534593047","text":"# core/mt_get_text.py\n\nfrom json import load, loads, dump, dumps\nfrom time import perf_counter_ns\nimport concurrent.futures\nfrom functools import wraps\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import NoSuchElementException, TimeoutException\nfrom core.log import log\nfrom core.chapter import Chapter, generate_book\nfrom core.base import BASE\n\n#> Declare Custom Exceptions\nclass SettingsButtonNotFound(NoSuchElementException):\n chapter: int\n msg: str = \"Unable to find settings button.\"\n\n def __init__(self, chapter: int, msg: str = \"Unable to find settings button.\"):\n self.chapter = chapter\n self.msg = msg\n\n def __repr__(self):\n return f\"SettingsButtonNotFound: Chapter {self.chapter}: {self.msg}\"\n\nclass BadWordsButtonNotFound(NoSuchElementException):\n chapter: int\n msg: str = \"Unable to find bad words button.\"\n\n def __init__(self, chapter: int, msg: str = \"Unable to find bad words button.\"):\n self.chapter = chapter\n self.msg = msg\n\n def __repr__(self):\n return f\"BadWordsButtonNotFound: Chapter {self.chapter}: {self.msg}\"\n\nclass ChapterTextNotFound(NoSuchElementException):\n chapter: int\n msg: str = \"Unable to find chapter text.\"\n\n def __init__(self, chapter: int, msg: str = \"Unable to find chapter text.\"):\n self.chapter = chapter\n self.msg = msg\n\n def __repr__(self):\n return f\"ChapterTextNotFound: Chapter {self.chapter}: {self.msg}\"\n\nclass ChapterTextNotFoundInTime(TimeoutException):\n chapter: int\n msg: str = \"Unable to find chapter text in allowed time.\"\n\n def __init__(\n self, chapter: int, msg: str = \"Unable to find chapter text in allowed time.\"\n ):\n self.chapter = chapter\n self.msg = msg\n\n def __repr__(self):\n return f\"ChapterTextNotFoundInTime: Chapter {self.chapter}: {self.msg}\"\n\nclass UnableToParseChapterText(Exception):\n chapter: int\n msg: str = \"Unable to parse chapter text.\"\n\n def __init__(self, chapter: int, msg: str = \"Unable to parse chapter text.\"):\n self.chapter = chapter\n self.msg = msg\n\n def __repr__(self):\n return f\"UnableToParseChapterText: Chapter {self.chapter}: {self.msg}\"\n\n#> Setup\n\n\nNUM_THREADS = 24\nchapter_dicts = []\n\n# read toc2\ndef read_toc():\n with open(\"json/toc2.json\", \"r\") as infile:\n toc = dict((load(infile)))\n return toc\n\n# Function Timer Decorator\ndef timer(*, entry: bool = True, exit: bool = True, level=\"DEBUG\"):\n \"\"\"\n A decorator that logs the entry of a function call, its exit, calculates the duration of the function and logs it.\n\n Args:\n `entry` (bool, optional): _description_. Defaults to True.\n `exit` (bool, optional): _description_. Defaults to True.\n `level` (str, optional): _description_. Defaults to \"DEBUG\".\n \"\"\"\n\n def wrapper(func):\n name = func.__name__\n t1 = 0\n t2 = 0\n\n @wraps(func)\n def wrapped(*args, **kwargs):\n timer_log = log.opt(depth=1)\n t1 = perf_counter()\n if entry:\n timer_log.log (level,f\"Entered {name}() at {t1})\\nargs: {args}\\nkwargs: {kwargs}\",\n )\n result = func(*args, **kwargs)\n t2 = perf_counter()\n if exit:\n timer_log.log(level, f\"Exiting {name}() @ {t2}\\nresult:\\n<{result}\"\n )\n return result\n\n duration = t2 - t1\n log.debug(f\"Function {name}() took {duration} seconds.\")\n return wrapped\n\n return wrapper\n\n\n#> Driver\n# called by get_chapter_text()\ndef browser():\n chromeoptions = webdriver.ChromeOptions().add_argument(\"--headless\")\n driver = webdriver.Chrome(options=chromeoptions)\n return driver\n\n# called by get_chapter_text()\n\ndef get_chapter_dict(chapter: int) -> dict:\n toc = read_toc()\n chapter_dict = toc[str(chapter)]\n return chapter_dict\n\n\n# called by get_chapter_text()\ndef click_settings(driver, chapter: int):\n # Wait for Settings Button to load, then click it\n try:\n settings_button = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.LINK_TEXT, \"SETTING\"))\n )\n settings_button.click()\n except NoSuchElementException:\n raise SettingsButtonNotFound(chapter)\n else:\n log.debug(f\"Chapter {chapter}: Clicked settings button.\")\n\n\n# @timer() # called by get_chapter_text()\ndef click_bad_words(driver, chapter: int):\n # Click Bad Words Button\n try:\n change_bad_words_button = driver.find_element(\n By.XPATH, '//*[@id=\"trang_doc\"]/div[6]/div[1]/div[2]/ul/li[5]/a'\n )\n change_bad_words_button.click()\n except NoSuchElementException:\n raise BadWordsButtonNotFound(chapter)\n else:\n log.debug(f\"Chapter {chapter}: Clicked bad words button.\")\n\n\n# @timer()# called by get_chapter_text()\ndef scrape_chapter_text(driver, chapter: int) -> str:\n # Wait for text to load; then get it\n try:\n text = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.ID, \"vung_doc\"))\n )\n text = driver.find_element(By.ID, \"vung_doc\")\n paragraphs = text.find_elements(By.TAG_NAME, \"p\")\n text = \"\"\n for paragraph in paragraphs:\n text = str(text + paragraph.text + \"\\n\\n\")\n\n # Strip erroneous whitespace characters\n text = text.strip()\n\n except NoSuchElementException:\n raise ChapterTextNotFound(chapter)\n except TimeoutException:\n raise ChapterTextNotFoundInTime(chapter)\n else:\n log.debug(f\"Chapter {chapter}: Found chapter text.\")\n return text\n\n\n# @timer() # called by get_chapter_text()\ndef parse_chapter_text(chapter: int, text: str) -> str:\n # Get Chapter_Dict\n chapter_dict = get_chapter_dict(chapter)\n title = chapter_dict[\"title\"]\n\n # Parse Text\n try:\n text_split = text.split(\"\\n\\n\")\n new_text_split = []\n for x, line in enumerate(text_split, start=1):\n if x == 1:\n if str(chapter) in line:\n line = \"\"\n elif title.lower() in line.lower():\n line = \"\"\n new_text_split.append(line)\n continue\n if x == 2:\n if \"Nyoi-Bo\" in line:\n line = \"\"\n elif \"nyoi-bo\" in line:\n line = \"\"\n elif str(chapter) in line:\n line = \"\"\n new_text_split.append(line)\n continue\n new_text_split.append(line)\n\n text = \"\\n\\n\".join(new_text_split)\n text = text.strip()\n except Exception:\n raise UnableToParseChapterText(chapter)\n else:\n log.debug(f\"Chapter {chapter}: Parsed chapter text.\")\n return text\n\ncount = 0\ntimer = {}\n# @timer() \ndef get_chapter_text(chapter: int) -> str:\n t1 = perf_counter_ns()\n CHAPTER = str(chapter)\n with open (\"json/toc2.json\", \"r\") as infile:\n toc = load(infile)\n chapter_url = toc[CHAPTER][\"url\"]\n chapter = int(toc[CHAPTER][\"chapter\"])\n chapter_title = toc[CHAPTER][\"title\"]\n driver = browser()\n driver.get(chapter_url)\n\n click_settings(driver, chapter)\n click_bad_words(driver, chapter)\n text = scrape_chapter_text(driver, chapter)\n text = parse_chapter_text(chapter, text)\n\n # Write chapter text to disk\n book = generate_book(chapter)\n chapter_zfill = str(chapter).zfill(4)\n book_zfill = str(book).zfill(2)\n filename = f\"chapter-{chapter_zfill}.txt\"\n filepath = f\"{BASE}/books/book{book_zfill}/text/{filename}\"\n\n with open(filepath, \"w\") as outfile:\n outfile.write(text)\n\n # Write chapter_dict to disk\n chapter_dict = {\n \"chapter\": chapter,\n \"title\": chapter_title,\n \"url\": chapter_url,\n \"text\": text,\n }\n with open(f\"json/chapter_dicts/chapter-{chapter_zfill}.json\", \"w\") as outfile:\n dump(chapter_dict, outfile, indent=4)\n chapter_dicts.append(chapter_dict)\n\n driver.quit()\n \n t2 = perf_counter_ns()\n elapsed_time = t2 - t1\n timer = f\"Chapter {chapter}: elapsed time: {elapsed_time}\\n\"\n with open ('json/timer.text', 'a') as outfile:\n outfile.write(timer)\n \n return chapter_dict\n\n\n# with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as executor:\n# futures = executor.map(get_chapter_text, chapter_gen())\n","repo_name":"maxludden/superforge","sub_path":"core/mt_get_text.py","file_name":"mt_get_text.py","file_ext":"py","file_size_in_byte":8646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"37784402653","text":"# -*- coding: utf-8 -*-\nimport datetime\nfrom gurobipy import *\nfrom itertools import product\nfrom functools import reduce\n\n\ndef flatten(model, layer, inputs):\n\n sym = inputs[layer.input.name]\n input_shape = layer.input_shape[1:]\n ndim = len(input_shape)\n assert ndim == 3, \"The dimension of flatten must be 3\"\n h, w, c = input_shape\n num = reduce(lambda x, y: x * y, input_shape)\n i = 0\n start = datetime.datetime.now()\n new_sym = model.addVars(num, lb=-GRB.INFINITY, ub=GRB.INFINITY, obj=1.0, vtype=GRB.CONTINUOUS,\n name=layer.name + \"_vars\")\n for idx in product(range(h), range(w), range(c)):\n model.addLConstr(new_sym[i] == sym[idx], name=layer.name + \"_constrs_\" + str(i))\n i += 1\n assert i == num, \"The calculation of output_shape occurs error\"\n end = datetime.datetime.now()\n print('flatten spend time:', (end - start).seconds)\n inputs[layer.output.name] = new_sym\n return inputs, model\n","repo_name":"WitnessNR/WiNR","sub_path":"layers/flatten.py","file_name":"flatten.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"8662707528","text":"import csv\nimport time\nimport StreamingAPI\nimport SearchAPI\n\nBOSTON_GEOCODE = \"42.362393,-71.062971,10km\"\nCHICAGO_GEOCODE = \"41.881832,-87.623177,10km\"\nROCKPORT_TEXAS_GEOCODE = \"28.048611,-97.041111,10km\"\nHOUSTON_TEXAS_GEOCODE = \"29.789054,-95.387083,10km\"\nMEXICO_CITY_GEOCODE = \"19.432608,-99.133209,10km\"\nMIAMI_GEOCODE = \"25.761681,-80.191788,10km\"\n\nif __name__ == '__main__':\n keywords = ['car','plane','fire','dead']\n # with open(\"chicago_tweets.csv\", 'wb') as csvfile:\n # fieldnames = ['timestamp', 'location', 'text', 'choose_one', 'choose_one:confidence']\n # writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n # writer.writeheader()\n #\n # for kw in keywords:\n # tweets = SearchAPI.fetch(kw, geocode=CHICAGO_GEOCODE, count=500)\n # for t in tweets:\n # writer.writerow(t)\n # csvfile.flush()\n #\n # keywords = ['water','drown','']\n # with open(\"houston_tweets.csv\", 'wb') as csvfile:\n # fieldnames = ['timestamp', 'location', 'text', 'choose_one', 'choose_one:confidence']\n # writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n # writer.writeheader()\n #\n # for kw in keywords:\n # tweets = SearchAPI.fetch(kw, geocode=HOUSTON_TEXAS_GEOCODE, count=500)\n # for t in tweets:\n # writer.writerow(t)\n\n keywords = ['evacuate','safe','stay','car','']\n with open(\"miami_tweets.csv\", 'wb') as csvfile:\n fieldnames = ['timestamp', 'location', 'text', 'choose_one', 'choose_one:confidence']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n\n for kw in keywords:\n tweets = SearchAPI.fetch(kw, geocode=MIAMI_GEOCODE, count=150)\n for t in tweets:\n writer.writerow(t)\n","repo_name":"glrn/nlp-disaster-analysis","sub_path":"twitter_api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"4"} +{"seq_id":"3364217070","text":"from orchestra.contrib.settings import Setting\nfrom orchestra.settings import ORCHESTRA_BASE_DOMAIN\n\n\nLISTS_DOMAIN_MODEL = Setting('LISTS_DOMAIN_MODEL',\n 'domains.Domain',\n validators=[Setting.validate_model_label]\n)\n\n\nLISTS_DEFAULT_DOMAIN = Setting('LISTS_DEFAULT_DOMAIN',\n 'lists.{}'.format(ORCHESTRA_BASE_DOMAIN),\n help_text=\"Uses ORCHESTRA_BASE_DOMAIN by default.\"\n)\n\n\nLISTS_LIST_URL = Setting('LISTS_LIST_URL',\n 'https://lists.{}/mailman/listinfo/%(name)s'.format(ORCHESTRA_BASE_DOMAIN),\n help_text=\"Uses ORCHESTRA_BASE_DOMAIN by default.\"\n)\n\n\nLISTS_MAILMAN_POST_LOG_PATH = Setting('LISTS_MAILMAN_POST_LOG_PATH',\n '/var/log/mailman/post'\n)\n\n\nLISTS_MAILMAN_ROOT_DIR = Setting('LISTS_MAILMAN_ROOT_DIR',\n '/var/lib/mailman'\n)\n\n\nLISTS_VIRTUAL_ALIAS_PATH = Setting('LISTS_VIRTUAL_ALIAS_PATH',\n '/etc/postfix/mailman_virtual_aliases'\n)\n\n\nLISTS_VIRTUAL_ALIAS_DOMAINS_PATH = Setting('LISTS_VIRTUAL_ALIAS_DOMAINS_PATH',\n '/etc/postfix/mailman_virtual_domains'\n)\n","repo_name":"glic3rinu/django-orchestra","sub_path":"orchestra/contrib/lists/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"4"} +{"seq_id":"17576770195","text":"import os\nimport datetime\n\nfrom src.process_mission import process_mission\nfrom src.plot_mission_cartopy import plot_mission\n\nplan_directory = './missions/test_mission_6/planner_outputs/'\n\nfor f in os.listdir(plan_directory):\n cross_track_ffor = 60 # deg\n along_track_ffor = 2 # deg\n cross_track_ffov = 0 # deg\n along_track_ffov = 0 # deg\n agility = 1 # deg/s\n num_planes = 1\n num_sats_per_plane = 5\n settings = {\n \"directory\": \"./missions/test_mission_6/\",\n \"step_size\": 1,\n \"duration\": 1,\n \"plot_interval\": 30,\n \"plot_duration\": 4/24,\n \"plot_location\": \"./missions/chrissi_results/\"+f[:-4],\n \"initial_datetime\": datetime.datetime(2020,1,1,0,0,0),\n \"grid_type\": \"static\", # can be \"event\" or \"static\"\n \"preplanned_observations\": \"./missions/test_mission_6/planner_outputs/\"+f,\n \"event_csvs\": [],\n \"plot_clouds\": False,\n \"plot_rain\": False,\n \"plot_obs\": True,\n \"cross_track_ffor\": cross_track_ffor,\n \"along_track_ffor\": along_track_ffor,\n \"cross_track_ffov\": cross_track_ffov,\n \"along_track_ffov\": along_track_ffov,\n \"num_planes\": num_planes,\n \"num_sats_per_plane\": num_sats_per_plane,\n \"agility\": agility,\n \"process_obs_only\": True\n }\n if not os.path.exists(\"./missions/chrissi_results/\"+f[:-4]):\n os.mkdir(\"./missions/chrissi_results/\"+f[:-4])\n process_mission(settings)\n plot_mission(settings)","repo_name":"bgorr/satplan","sub_path":"src/utils/plot_chrissi_results.py","file_name":"plot_chrissi_results.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"33123022637","text":"import math\nimport pygame as pg\n\nfrom animator import Animator\nfrom entity import Entity\n\nclass Player(Entity):\n \"\"\"Player class.\"\"\"\n\n SIZE = (64, 104)\n\n def __init__(self, world, position):\n\n super().__init__(world, \"player\", position)\n\n # self._load_images_from_sprite_sheet(\"data/images/ranger.png\") \n # self.animator = Animator(self, self.images)\n\n self.image = pg.image.load(\"data/images/ranger/idle.png\")\n\n self.hp = 36\n\n self.speed = 0.3\n\n self._prep_name_label()\n\n def update(self, frame_time):\n\n\n # print(f\"Direction: ({self.movement_direction[0]}, {self.movement_direction[1]}); Location: ({self.x}, {self.y})\\r\", end=\"\")\n\n if self.movement_direction.x or self.movement_direction.y:\n # print(f\"{self.movement_direction}\\r\", end=\"\")\n self.position += self.movement_direction.get_normalised() * self.speed * frame_time\n\n super().update()\n self.name_rect.center = self.rect.centerx, self.rect.y - 16\n\n # self.animator.update(dt)\n\n def draw(self, surface):\n surface.blit(self.image, self.rect)\n if self.world.game_instance.settings.draw_debug:\n surface.blit(self.name_surface, self.name_rect)\n pg.draw.rect(surface, (255, 0, 0), self.rect, 1)\n","repo_name":"NicolasKingreen/outrange","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"1899006499","text":"#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n# Created on 2022-07-12 13:39\r\n# Author: FATE ZHOU\r\n\r\nfrom __future__ import division, print_function # Loading modules\r\nimport time\r\nimport datetime\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom PyEMD import EMD, EEMD, CEEMDAN # CEEMDAN # pip install EMD-signal\r\nfrom sampen import sampen2 # Sample Entropy\r\nfrom vmdpy import VMD # VMD\r\n# Sklearn\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error, mean_absolute_percentage_error # R2 MSE MAE MAPE\r\nfrom sklearn.preprocessing import MinMaxScaler # Normalization\r\n# Keras\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Dense, Activation, Dropout, LSTM, GRU\r\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping\r\n\r\n# 1.Decomposition function\r\n# ========================================================================\r\ndef ceemdan_decompose(series=None, trials=10, num_clusters = 3): # CEEMDAN Decompose\r\n decom = CEEMDAN()\r\n decom.trials = trials # Number of the white noise input\r\n df_ceemdan = pd.DataFrame(decom(series.values).T)\r\n df_ceemdan.columns = ['imf'+str(i) for i in range(len(df_ceemdan.columns))]\r\n return df_ceemdan\r\n\r\ndef sample_entropy(df_ceemdan=None, mm=1, r=0.1): # Sample Entropy Calculate; mm = 1 or 2; r = 0.1 or 0.2 \r\n np_sampen = []\r\n for i in range(len(df_ceemdan.columns)):\r\n sample_entropy = sampen2(list(df_ceemdan['imf'+str(i)].values),mm=mm,r=r,normalize=True)\r\n np_sampen.append(sample_entropy[1][1])\r\n df_sampen = pd.DataFrame(np_sampen, index=['imf'+str(i) for i in range(len(df_ceemdan.columns))], columns=[CODE])\r\n return df_sampen\r\n\r\ndef kmeans_cluster(df_sampen=None, num_clusters=3): # K-Means Cluster by Sample Entropy\r\n np_integrate_form = KMeans(n_clusters=num_clusters, random_state=9).fit_predict(df_sampen)\r\n df_integrate_form = pd.DataFrame(np_integrate_form, index=['imf'+str(i) for i in range(len(df_sampen.index))], columns=['Cluster'])\r\n return df_integrate_form\r\n\r\ndef integrate_imfs(df_integrate_form=None, df_ceemdan=None): # Integrate IMFs and Residue to be 3 Co-IMFs\r\n df_tmp = pd.DataFrame()\r\n for i in range(df_integrate_form.values.max()+1):\r\n df_tmp['imf'+str(i)] = df_ceemdan[df_integrate_form[(df_integrate_form['Cluster']==i)].index].sum(axis=1)\r\n df_integrate_result = df_tmp.T # Use Sample Entropy sorting the Co-IMFs\r\n df_integrate_result['sampen'] = sample_entropy(df_tmp).values\r\n df_integrate_result.sort_values(by=['sampen'], ascending=False, inplace=True)\r\n df_integrate_result.index = ['co-imf'+str(i) for i in range(df_integrate_form.values.max()+1)]\r\n df_integrate_result = df_integrate_result.drop('sampen', axis=1, inplace=False)\r\n return df_integrate_result.T\r\n\r\ndef vmd_decompose(series=None, alpha=2000, tau=0, K=10, DC=0, init=1, tol=1e-7, draw=True): # VMD Decomposition\r\n imfs_vmd, imfs_hat, omega = VMD(series, alpha, tau, K, DC, init, tol) \r\n df_vmd = pd.DataFrame(imfs_vmd.T)\r\n df_vmd.columns = ['imf'+str(i) for i in range(K)]\r\n return df_vmd \r\n\r\n# 2.Forecasting function\r\n# ========================================================================\r\ndef GRU_model(trainset_shape):# Build GRU model\r\n model = Sequential()\r\n model.add(GRU(128, input_shape=(trainset_shape[1], trainset_shape[2]), activation='tanh', return_sequences=True))\r\n model.add(Dropout(0.2))\r\n model.add(GRU(64,activation='tanh',return_sequences=True))\r\n model.add(Dropout(0.2))\r\n model.add(GRU(32,activation='tanh',return_sequences=False))\r\n model.add(Dropout(0.2))\r\n model.add(Dense(1,activation='tanh'))\r\n model.compile(loss='mse', optimizer='adam')\r\n return model\r\n \r\ndef evaluation_model(y_test, y_pred): # Model evaluation function\r\n y_test,y_pred = np.array(y_test).ravel(),np.array(y_pred).ravel()\r\n r2 = r2_score(y_test, y_pred)\r\n rmse = mean_squared_error(y_test, y_pred, squared=False) # MSE and MAE are different on different scales\r\n mae = mean_absolute_error(y_test, y_pred)\r\n mape = mean_absolute_percentage_error(y_test, y_pred)\r\n df_evaluation = pd.DataFrame({'r2': r2, 'rmse': rmse, 'mae': mae, 'mape': mape}, index = range(1))\r\n return df_evaluation\r\n\r\ndef create_train_test_set(data=None, timestep=30, co_imf_predict_for_fitting=None): # Create training set and test set with normalization\r\n if isinstance(data, pd.DataFrame): # Initialize DataFrame training set and test set\r\n dataY = data['sum'].values.reshape(-1, 1)\r\n dataX = data.drop('sum', axis=1, inplace=False)\r\n else: # Initialize Series\r\n dataY = data.values.reshape(-1, 1)\r\n dataX = dataY\r\n\r\n scalarX = MinMaxScaler(feature_range=(0,1)) # Normalize by sklearn\r\n dataX = scalarX.fit_transform(dataX)\r\n if co_imf_predict_for_fitting is not None: co_imf_predict_for_fitting = scalarX.transform(co_imf_predict_for_fitting)\r\n \r\n scalarY = MinMaxScaler(feature_range=(0,1))\r\n dataY = scalarY.fit_transform(dataY)\r\n \r\n trainX, trainY = [], [] # Create training set and test set\r\n for i in range(len(dataY)-timestep):\r\n trainX.append(np.array(dataX[i:(i+timestep)]))\r\n trainY.append(np.array(dataY[i+timestep]))\r\n if co_imf_predict_for_fitting is not None: # When fitting, it uses today's forecasting result \r\n if i<(len(dataY)-timestep-len(co_imf_predict_for_fitting)): trainX[i] = np.insert(trainX[i], timestep, dataX[i+timestep], 0)\r\n else: trainX[i] = np.insert(trainX[i], timestep, co_imf_predict_for_fitting[i-(len(dataY)-timestep-len(co_imf_predict_for_fitting))], 0)\r\n \r\n return np.array(trainX), np.array(trainY), scalarY\r\n\r\ndef GRU_predict(data=None, epochs=100, predict_duration=100, fitting=None): # GRU forecasting function\r\n trainX,trainY,scalarY = create_train_test_set(data, co_imf_predict_for_fitting=fitting) # Get training and test X Y\r\n x_train,x_test = trainX[:-predict_duration],trainX[-predict_duration:] # Split training and test set\r\n y_train,y_test = trainY[:-predict_duration],trainY[-predict_duration:]\r\n train_X = x_train.reshape((x_train.shape[0], x_train.shape[1], x_train.shape[2])) # Convert to tensor \r\n test_X = x_test.reshape((x_test.shape[0], x_test.shape[1], x_test.shape[2])) # Convert to tensor \r\n\r\n model = GRU_model(train_X.shape) # Build the model # Use model.summary() to show the model structure\r\n patience = epochs//10 \r\n EarlyStop = EarlyStopping(monitor='val_loss', patience=5*patience, verbose=0, mode='auto') # Early stop at small learning rate\r\n Reduce = ReduceLROnPlateau(monitor='val_loss', patience=patience, verbose=0, mode='auto') # Adaptive learning rate\r\n history = model.fit(train_X, y_train, epochs=epochs, batch_size=16, validation_split=0.1, verbose=0, shuffle=True, callbacks=[EarlyStop,Reduce]) # Train the model\r\n \r\n y_test_predict = model.predict(test_X) # Predict\r\n df_gru_evaluation = evaluation_model(y_test, y_test_predict) # Evaluate model\r\n y_test_predict = y_test_predict.ravel().reshape(-1,1) \r\n y_test_predict_result = scalarY.inverse_transform(y_test_predict) # De-normalize \r\n y_test_raw = scalarY.inverse_transform(y_test) \r\n df_predict_raw = pd.DataFrame({'raw': y_test_raw.ravel(), 'predict': y_test_predict_result.ravel()}, index=range(len(y_test_raw))) # Output\r\n df_train_loss= pd.DataFrame({'loss': history.history['loss'], 'val_loss': history.history['val_loss']}, index=range(len(history.history['val_loss'])))\r\n return df_predict_raw, df_gru_evaluation, df_train_loss\r\n\r\n# 3.Main function\r\n# ========================================================================\r\nif __name__ == '__main__':\r\n start = time.time()\r\n CODE, PATH = 'sh.000001', 'D:\\\\Stock-LSTM\\\\' # code such as 'sh.000001'\r\n\r\n # 1.Load raw data\r\n df_raw_data = pd.read_csv(PATH+CODE+'.csv', header=0, parse_dates=['date'], date_parser=lambda x: datetime.datetime.strptime(x, '%Y%m%d'))\r\n series_close = pd.Series(df_raw_data['close'].values,index = df_raw_data['date'])\r\n\r\n # 2.CEEMDAN decompose\r\n df_ceemdan = ceemdan_decompose(series_close) \r\n # df_ceemdan.plot(title='CEEMDAN Decomposition', subplots=True)\r\n\r\n # 3.Sample Entropy Calculate\r\n df_sampen = sample_entropy(df_ceemdan) \r\n # df_sampen.plot(title='Sample Entropy')\r\n\r\n # 4.K-Means Cluster by Sample Entropy\r\n df_integrate_form = kmeans_cluster(df_sampen) \r\n # print(df_integrate_form)\r\n\r\n # 5.Integrate IMFs and Residue to be 3 Co-IMFs\r\n df_integrate_result = integrate_imfs(df_integrate_form, df_ceemdan)\r\n # df_integrate_result.plot(title='Integrated IMFs (Co-IMFs) of CEEMDAN', subplots=True)\r\n\r\n # 6.Secondary Decompose the high-frequency Co-IMF0 by VMD\r\n df_vmd_co_imf0 = vmd_decompose(df_integrate_result['co-imf0']) # vmd decomposition (The number of dataset must be even)\r\n # df_vmd_co_imf0.plot(title='VMD Decomposition of Co-IMF0', subplots=True)\r\n\r\n # 7.Predict Co-IMF0 by matrix-input GRU\r\n time0 = time.time()\r\n df_vmd_co_imf0['sum'] = df_integrate_result['co-imf0']\r\n co_imf0_predict_raw, co_imf0_gru_evaluation, co_imf0_train_loss = GRU_predict(df_vmd_co_imf0)\r\n print('======Co-IMF0 Predicting Finished======\\n', co_imf0_gru_evaluation)\r\n time1 = time.time()\r\n print('Running time: %.3fs'%(time1-time0))\r\n # co_imf0_predict_raw.plot(title='Co-IMF0 Predicting Result')\r\n # co_imf0_train_loss.plot(title='Co-IMF0 Training Loss')\r\n\r\n # 8.Predict Co-IMF1 and Co-IMF2 by vector-input GRU\r\n co_imf1_predict_raw, co_imf1_gru_evaluation, co_imf1_train_loss = GRU_predict(df_integrate_result['co-imf1'])\r\n print('======Co-IMF1 Predicting Finished======\\n', co_imf1_gru_evaluation)\r\n time2 = time.time()\r\n print('Running time: %.3fs'%(time2-time1))\r\n # co_imf1_predict_raw.plot(title='Co-IMF1 Predicting Result')\r\n # co_imf1_train_loss.plot(title='Co-IMF1 Training Loss')\r\n\r\n co_imf2_predict_raw, co_imf2_gru_evaluation, co_imf2_train_loss = GRU_predict(df_integrate_result['co-imf2'])\r\n print('======Co-IMF2 Predicting Finished======\\n', co_imf2_gru_evaluation)\r\n time3 = time.time()\r\n print('Running time: %.3fs'%(time3-time2))\r\n # co_imf2_predict_raw.plot(title='Co-IMF2 Predicting Result')\r\n # co_imf2_train_loss.plot(title='Co-IMF2 Training Loss')\r\n\r\n # 9. Add 3 result to get the final forecasting result (instead fitting method )\r\n duration = 100\r\n series_add_predict_result = co_imf0_predict_raw['predict']+co_imf1_predict_raw['predict']+co_imf2_predict_raw['predict']\r\n df_add_predict_raw = pd.DataFrame({'predict': series_add_predict_result.values, 'raw': series_close[-duration:].values}, index=range(duration))\r\n df_add_evaluation = evaluation_model(series_close[-duration:],series_add_predict_result)\r\n print('======'+CODE+' Predicting Finished======\\n', df_add_evaluation)\r\n end = time.time()\r\n print('Total Running time: %.3fs'%(end-start))\r\n df_add_predict_raw.plot(title=CODE+' Predicting Result')\r\n # pd.DataFrame.to_csv(df_add_predict_raw, PATH+CODE+'_predict_output.csv')\r\n\r\n # 10.Fit 3 result to get the final forecasting result (instead adding method )\r\n \"\"\"\r\n df_co_imf_predict_raw = pd.DataFrame({'co-imf0': co_imf0_predict_raw['predict'], 'co-imf1': co_imf1_predict_raw['predict'], 'co-imf2': co_imf2_predict_raw['predict']}, index=range(len(co_imf0_predict_raw)))\r\n df_fitting_set = df_integrate_result\r\n df_fitting_set['sum'] = series_close.values\r\n df_predict_raw, df_gru_evaluation, df_train_loss = GRU_predict(df_fitting_set, fitting=df_co_imf_predict_raw)\r\n print('======'+CODE+' Predicting Finished======\\n', df_gru_evaluation)\r\n end = time.time()\r\n print('Running time: %.3fs'%(end-time3))\r\n print('Total Running time: %.3fs'%(end-start))\r\n df_predict_raw.plot(title=CODE+' Predicting Result')\r\n df_train_loss.plot(title=CODE+' Training Loss')\r\n # pd.DataFrame.to_csv(df_predict_raw, PATH+CODE+'_predict_output.csv')\r\n \"\"\"","repo_name":"FateMurphy/CEEMDAN-VMD-GRU","sub_path":"CEEMDAN_VMD_GRU_Stock.py","file_name":"CEEMDAN_VMD_GRU_Stock.py","file_ext":"py","file_size_in_byte":12079,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"4"} +{"seq_id":"26175002961","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\napp_name = 'persona_app'\n\nurlpatterns = [\n path('', views.InicioView.as_view(), name='inicio'),\n path('listar-empleados/', views.ListAllEmpleados.as_view(), name=\"empleados_all\"),\n path('lista-empleados-admin/', views.ListEmpleadosAdmin.as_view(), name=\"empleados_admin\"),\n path('lista-by-area/', views.ListByAreaEmpleados.as_view(), name=\"empleados_por_departamento\"),\n path('lista-by-trabajo/', views.ListByJobEmpleado.as_view()),\n path('buscar-empleado/', views.ListEmpleadoByKword.as_view()),\n path('habilidades/', views.ListHabilidadesEmpleado.as_view()),\n path('ver-detalle-empleado/', views.EmpleadoDetailView.as_view(), name=\"ver_empleado\"),\n path('add-empleado/', views.EmpleadoCreateView.as_view(), name=\"add_empleado\"),\n path('success/', views.SuccessView.as_view(), name='correcto'),\n path('update-empleado/', views.EmpleadoUpdateView.as_view(), name='modificar_empleado'),\n path('delete-empleado/', views.EmpleadoDeleteView.as_view(), name='eliminar_empleado'),\n]","repo_name":"vidalchile/sistema-registro-empleados","sub_path":"applications/persona/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"6237338814","text":"import os\nimport glob\nfrom split_settings.tools import include\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(\n os.path.dirname(\n os.path.dirname(\n os.path.abspath(__file__)\n )\n )\n)\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ['PROJECT_SECRET_KEY']\n\nENV = os.environ.get('PROJECT_ENV', 'development')\n\nCOMPONENTS_DIR = os.path.join(BASE_DIR, 'project', 'settings', 'components')\n\nCOMPONENTS = [\n 'components/{}'.format(os.path.basename(component))\n for component in glob.glob(os.path.join(COMPONENTS_DIR, '*.py'))\n]\nENVIRONMENTS = ['environments/{}.py'.format(ENV)]\n\nSETTINGS = COMPONENTS + ENVIRONMENTS\n\ninclude(*SETTINGS)\n","repo_name":"joeyworld/drf-basic-settings","sub_path":"project/settings/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"20044632845","text":"from picamera.array import PiRGBArray\nfrom picamera import PiCamera\nfrom pymavlink import mavutil\nfrom datetime import datetime\nfrom csv import writer\nimport cv2\nimport yaml\nimport numpy as np\nimport pandas as pd\nimport pyzbar.pyzbar as pyzbar\nimport math\nimport time\n\nCAMERA_PARAMETERS_INPUT_FILE = \"cam1.yaml\"\n\nwith open(CAMERA_PARAMETERS_INPUT_FILE) as f:\n loadeddict = yaml.safe_load(f)\n mtx = loadeddict.get('camera_matrix')\n dist = loadeddict.get('dist_coeff')\n mtx = np.array(mtx)\n mtx_inv = np.linalg.inv(mtx)\n dist = np.array(dist)\n\ndef requerir_mensaje(mensaje,intervalo):\n the_connection.mav.command_long_send(the_connection.target_system, the_connection.target_component, mavutil.mavlink.MAV_CMD_SET_MESSAGE_INTERVAL, 0, mensaje, intervalo, 0, 0, 0, 0, 0) #CON ESTE COMANDO SE REQUIERE LA INFORMACION DE ALGUN MENSAJE CON FRECUENCIA EN US #245:EXTENDED_SYS_STATUS\n msg = the_connection.recv_match(type=\"COMMAND_ACK\",blocking=True)\n print()\n print(msg)\n return\n\ndef registrar(namefile,estado): #Registramos nuevas filas al archivo csv del data frame\n global i\n msg0=the_connection.recv_match(type=\"LOCAL_POSITION_NED\",blocking=True)\n msg=the_connection.messages['LOCAL_POSITION_NED']\n msg0=the_connection.recv_match(type=\"ATTITUDE\",blocking=True)\n yaw=msg0.yaw\n i+=1\n DATOS=[i,msg.z,msg.x,msg.y,msg.vx,msg.vy,yaw,estado]\n with open(namefile,\"a\",newline=\"\") as f:\n wo=writer(f)\n wo.writerow(DATOS)\n f.close()\n return\n\n# the_connection = mavutil.mavlink_connection('tcp:127.0.0.1:5762') # STIL LOCAL \n# the_connection = mavutil.mavlink_connection('tcp:172.31.69.224:5762') # SITL REMOTO \nthe_connection = mavutil.mavlink_connection('/dev/serial0',baud=57600) # PROTOTIPO\n\nthe_connection.wait_heartbeat()\n\n#CSV\nnow=datetime.now()\nday=now.strftime(\"%d_%m_%Hh%Mm\")\nnamefile=\"/home/pi/TIC_ORTEGA_LUIS/Datos/DATOS_F2/F2_\"+day+\".csv\"\n\nlista=[\"tiempo\",\"altitud_imu\",\"x\",\"y\",\"vx\",\"vy\",\"yaw\",\"estado\"]\ndf=pd.DataFrame(columns=lista)\ndf.to_csv(namefile, index=False) #index=False para eliminar la columna unnamed:0 que se crea \ni=0\n\nrequerir_mensaje(245,1000000)\nrequerir_mensaje(32,1000000)\nrequerir_mensaje(30,1000000)\n\n# SCANNEAR QR\n\n# inicializar la camara\ncamera = PiCamera()\ncamera.resolution = (640, 480)\ncamera.framerate = 30\nrawCapture = PiRGBArray(camera, size=(640, 480))\nfont = cv2.FONT_HERSHEY_PLAIN\n\ntime.sleep(0.1) # permitir que la cámara de encienda\n\ncenter=False\ntracker=cv2.TrackerCSRT_create()\ntracker_init=False\n\n\"\"\"\nCOMMAND_ACK {command : 511, result : 0}\n{'STABILIZE': 0, 'ACRO': 1, 'ALT_HOLD': 2, 'AUTO': 3, 'GUIDED': 4, 'LOITER': 5,\n'RTL': 6,'CIRCLE': 7, 'POSITION': 8, 'LAND': 9, 'OF_LOITER': 10, 'DRIFT': 11,\n'SPORT': 13, 'FLIP': 14,'AUTOTUNE': 15, 'POSHOLD': 16, 'BRAKE': 17, 'THROW': 18,\n'AVOID_ADSB': 19, 'GUIDED_NOGPS': 20, 'SMART_RTL': 21, 'FLOWHOLD': 22, 'FOLLOW': 23,\n'ZIGZAG': 24, 'SYSTEMID': 25, 'AUTOROTATE': 26, AUTO_RTL': 27}\n\"\"\"\n\n# mode_id=the_connection.mode_mapping()['GUIDED'] # RECOMENDADO USAR EL ID Y NO EL NOMBRE\nmode_id=4 #GUIDED\nthe_connection.mav.command_long_send(the_connection.target_system, the_connection.target_component,\n mavutil.mavlink.MAV_CMD_DO_SET_MODE, 0, 0, mode_id, 0, 0, 0, 0, 0)\nthe_connection.set_mode(mode_id)\n\nmsg = the_connection.recv_match(type='COMMAND_ACK', blocking=True)\nprint()\nprint(msg)\n\nthe_connection.mav.send(mavutil.mavlink.MAVLink_set_position_target_local_ned_message(10, the_connection.target_system,\n the_connection.target_component, mavutil.mavlink.MAV_FRAME_LOCAL_OFFSET_NED, int(0b110111111000), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\n\n\nfor frame0 in camera.capture_continuous(rawCapture, format=\"bgr\", use_video_port=True):\n img=frame0.array\n frame=cv2.undistort(img,mtx,dist)\n wimg=int(frame.shape[1]*0.5)\n himg=int(frame.shape[0]*0.5)\n decodedObjects = pyzbar.decode(frame)\n for obj in decodedObjects:\n cv2.putText(frame, str(obj.data), (50, 50), font, 3,\n (255, 0, 0), 3) \n try:\n if str(obj.data)==\"b'HELIPAD'\" and tracker_init==False:\n x,y,w,h=decodedObjects[0].rect.left,decodedObjects[0].rect.top,decodedObjects[0].rect.width,decodedObjects[0].rect.height\n cx=int(x+w/2)\n cy=int(y+h/2)\n cv2.putText(frame,\"x:\"+str(cx-wimg)+\",y:\"+str(himg-cy),(cx,cy),font,2,(255,0,0),2)\n cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),3)\n BB=(x,y,w,h) # El BB es el rectangulo que contiene el código QR con la palabra HELIPAD\n tracker.init(frame,BB)\n tracker_init=True\n registrar(namefile,\"HELIPAD\")\n else:\n print(\"QR desconocido\")\n except:\n print(\"SIN HELIPUERTO\")\n registrar(namefile,\"no existe\")\n \n if tracker_init==True:\n track_success,BB=tracker.update(frame)\n if track_success:\n x,y,w,h=BB[0],BB[1],BB[2],BB[3]\n cx=int(x+w/2)\n cy=int(y+h/2)\n cv2.putText(frame, \"Trackerx:\"+str(cx-wimg)+\",Trackery:\"+str(himg-cy),(cx,cy),font,3,(255,0,0),3)\n cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),3) \n rx=cx-wimg\n ry=himg-cy\n Cimg=(wimg,himg)\n Cqr=(cx,cy)\n # print(math.dist(Cimg,Cqr))\n #CENTRAMOS\n if math.dist(Cimg,Cqr)<20:\n the_connection.mav.send(mavutil.mavlink.MAVLink_set_position_target_local_ned_message(10, the_connection.target_system,\n the_connection.target_component, mavutil.mavlink.MAV_FRAME_BODY_OFFSET_NED, int(0b110111111000), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\n center=True\n tracker_init=False\n # cap.release()\n cv2.destroyAllWindows()\n registrar(namefile,\"complete!\")\n print(\"COMPLETE!\")\n break\n the_connection.mav.send(mavutil.mavlink.MAVLink_set_position_target_local_ned_message(10, the_connection.target_system,\n the_connection.target_component, mavutil.mavlink.MAV_FRAME_BODY_OFFSET_NED, int(0b110111111000), ry/300, rx/300, 0, 0, 0, 0, 0, 0, 0, 0, 0)) \n print(\"rx: \",rx,\"ry: \",ry)\n registrar(namefile,\"tracker\")\n else:\n print(\"se perdio\")\n tracker_init=False\n\n cv2.imshow(\"image \",frame)\n rawCapture.truncate(0)\n # if (cv2.waitKey(1) == ord('s')):\n if center == True or (cv2.waitKey(1) == ord('s')):\n break\ncv2.destroyAllWindows()\n\n#SET MODE\n\nmode_id=6 # RTL\nthe_connection.mav.command_long_send(the_connection.target_system, the_connection.target_component,\n mavutil.mavlink.MAV_CMD_DO_SET_MODE, 0, 0, mode_id, 0, 0, 0, 0, 0)\nthe_connection.set_mode(mode_id)\n\nmsg = the_connection.recv_match(type='COMMAND_ACK', blocking=True)\nprint()\nprint(msg)","repo_name":"GHDavid97/LUIS_ORTEGA_TIC","sub_path":"aterrizaje_F2_qrscanner.py","file_name":"aterrizaje_F2_qrscanner.py","file_ext":"py","file_size_in_byte":6954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"28191816923","text":"from random import randint\n\n#tableau\ntableau = [[\" \", \" \", \" \"],[\" \", \" \", \" \"],[\" \", \" \", \" \"]]\n\n#variable\njeu = True\nresult = 0\ntours = 0\ncase = 0\ni = randint(0, 1)\n\n#board\ndef board():\n\n print(\"\\n\\n+~~~~~+~~~~~+~~~~~+ +~~~~~+~~~~~+~~~~~+\")\n print(f\"| {tableau[0][0]} | {tableau[0][1]} | {tableau[0][2]} | | a | b | c |\")\n print(\"+~~~~~+~~~~~+~~~~~+ +~~~~~+~~~~~+~~~~~+\")\n print(f\"| {tableau[1][0]} | {tableau[1][1]} | {tableau[1][2]} | aide --> | d | e | f |\")\n print(\"+~~~~~+~~~~~+~~~~~+ +~~~~~+~~~~~+~~~~~+\")\n print(f\"| {tableau[2][0]} | {tableau[2][1]} | {tableau[2][2]} | | g | h | i |\")\n print(\"+~~~~~+~~~~~+~~~~~+ +~~~~~+~~~~~+~~~~~+\\n\")\n\n\n#check\ndef check(tableau):\n result = 0\n croix = \"X\"\n rond = \"O\"\n # lignes\n if tableau[0][0] == croix and tableau[0][1] == croix and tableau[0][2] == croix:\n result = 1\n elif tableau[0][0] == rond and tableau[0][1] == rond and tableau[0][2] == rond:\n result = 2\n elif tableau[1][0] == croix and tableau[1][1] == croix and tableau[1][2] == croix:\n result = 1\n elif tableau[1][0] == rond and tableau[1][1] == rond and tableau[1][2] == rond:\n result = 2\n elif tableau[2][0] == croix and tableau[2][1] == croix and tableau[2][2] == croix:\n result = 1\n elif tableau[2][0] == rond and tableau[2][1] == rond and tableau[2][2] == rond:\n result = 2\n # colones\n elif tableau[0][0] == croix and tableau[1][0] == croix and tableau[2][0] == croix:\n result = 1\n elif tableau[0][0] == rond and tableau[1][0] == rond and tableau[2][0] == rond:\n result = 2\n elif tableau[0][1] == croix and tableau[1][1] == croix and tableau[2][1] == croix:\n result = 1\n elif tableau[0][1] == rond and tableau[1][1] == rond and tableau[2][1] == rond:\n result = 2\n elif tableau[0][2] == croix and tableau[1][2] == croix and tableau[2][2] == croix:\n result = 1\n elif tableau[0][2] == rond and tableau[1][2] == rond and tableau[2][2] == rond:\n result = 2\n # diagonales\n elif tableau[0][0] == croix and tableau[1][1] == croix and tableau[2][2] == croix:\n result = 1\n elif tableau[0][0] == rond and tableau[1][1] == rond and tableau[2][2] == rond:\n result = 2\n elif tableau[0][2] == croix and tableau[1][1] == croix and tableau[2][0] == croix:\n result = 1\n elif tableau[0][2] == rond and tableau[1][1] == rond and tableau[2][0] == rond:\n result = 2\n #egalité\n elif case == 9:\n result = 3\n # partie pas finie\n else:\n result = 0\n return result\n\n\n\ndef ask():\n symbole = [\"X\", \"O\"]\n current_symbol = symbole[i % len(symbole)]\n if current_symbol == \"X\":\n print(\"Joueur 1\")\n elif current_symbol == \"O\":\n print('Joueur 2')\n reponse = input(\"quelle case voulez vous remplir ? \")\n if reponse == \"a\" and tableau[0][0] == \" \":\n tableau[0][0] = current_symbol\n elif reponse == \"b\" and tableau[0][1] == \" \":\n tableau[0][1] = current_symbol\n elif reponse == \"c\" and tableau[0][2] == \" \":\n tableau[0][2] = current_symbol\n elif reponse == \"d\" and tableau[1][0] == \" \":\n tableau[1][0] = current_symbol\n elif reponse == \"e\" and tableau[1][1] == \" \":\n tableau[1][1] = current_symbol\n elif reponse == \"f\" and tableau[1][2] == \" \":\n tableau[1][2] = current_symbol\n elif reponse == \"g\" and tableau[2][0] == \" \":\n tableau[2][0] = current_symbol\n elif reponse == \"h\" and tableau[2][1] == \" \":\n tableau[2][1] = current_symbol\n elif reponse == \"i\" and tableau[2][2] == \" \":\n tableau[2][2] = current_symbol\n else:\n print(\"cette case est deja prise\")\n\n\n#fin\ndef fin(result):\n jeu = False\n if result == 3:\n print(\"égalité\")\n elif result == 1:\n print(f\"Les croix ont gagnées !\")\n jeu = True\n elif result == 2:\n print(\"Les rond ont gagnées !\")\n jeu = True\n return jeu\n#jeu\nwhile check(tableau) == 0:\n board()\n ask()\n board()\n i += 1\n fin(check(tableau))\n tours += 1\n if tours == 9:\n print(\"égalité\")\n break\n\nprint(\"Fin du jeu\")","repo_name":"melvin-palacios/tic-tac-toe","sub_path":"Tic tac toe V1.py","file_name":"Tic tac toe V1.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"24895012035","text":"from db.base import Base, sessionFactory\nfrom db.Orm.EmployeeOrm import EmployeeOrm\nimport Room\nimport Visitor\n\nclass Employee :\n #Class variable\n jumlahEmp = 0;\n list_employee = []\n #instance\n def __init__(self, nama_emp, TL_emp, jabatan_emp, JK_emp, alamat_emp):\n self.nama_emp = nama_emp\n self.__id_emp = \"EMP\"+str(Employee.jumlahEmp+1)\n self.TL_emp = TL_emp\n self.jabatan_emp = jabatan_emp\n self.JK_emp = JK_emp\n self.alamat_emp = alamat_emp\n Employee.list_employee.append(self)\n Employee.jumlahEmp += 1\n\n #self.info = \"name {} : \\n\\t id_emp: {}\\n\\t jabatan: {}\".format(self.nama_emp, self.__id_emp, self.jabatan_emp)\n #method\n\n def getNama(self):\n return self.nama_emp\n def setNama(self) :\n new = input(\"masukkan nama baru : \")\n self.nama_emp = new\n @property\n def info(self):\n return \"name {} : \\n\\t id_emp: {}\\n\\t jabatan: {}\".format(self.nama_emp, self.__id_emp, self.jabatan_emp)\n @property\n def id_emp(self):\n pass\n @id_emp.getter\n def id_emp(self):\n return self.__id_emp\n @id_emp.setter\n def id_emp(self):\n new = input(\"masukkan id baru : \")\n self.__id_emp = new\n\n def getTL(self):\n return self.TL_emp\n def setTL(self):\n new = input(\"masukkan tanggal lahir baru : \")\n self.TL_emp = new\n\n def getJabatan(self):\n return self.jabatan_emp\n def setJabatan(self):\n new = input(\"masukkan jabatan baru : \")\n self.jabatan_emp = new\n\n def getJK(self):\n return self.JK_emp\n def setJK(self):\n new = input(\"masukkan jenis kelamin baru : \")\n self.JK_emp = new\n \n def getAlamat(self):\n return self.alamat_emp\n def setAlamat(self):\n new = input(\"masukkan alamat baru : \")\n self.alamat_emp = new\n\nclass Receptionist(Employee):\n list_receptionist = []\n daftar_harga = Room.Room.daftar_harga\n jumlahRec = 0\n def __init__(self, nama_emp, TL_emp, JK_emp, alamat_emp):\n super().__init__(nama_emp, TL_emp, \"Receptionist\", JK_emp, alamat_emp)\n self.tagihan = []\n Receptionist.jumlahRec += 1\n\n def menu(self):\n menu = input(\"Selamat datang, Apa yang ingin anda lakukan? \\n1. Booking\\n2. Cari Ruangan \\n3. Check Out\\n ===[>\")\n if menu == \"1\" or menu == \"Booking\" or menu == \"booking\":\n self.book()\n elif menu == \"2\" or menu == \"cari\" or menu == \"Cari\":\n self.search_room()\n elif menu == \"3\" or menu == \"checkout\" or menu == \"out\":\n self.checkOut()\n else : \n print(\"mohon masukkan kata kunci yang benar!\")\n self.menu()\n\n def book(self):\n KTP = input(\"masukkan nomor KTP anda : \")\n for i in Visitor.Visitor.list_visitor : \n if KTP == i.no_KTP :\n i.book()\n \"\"\"room_code = input(\"Masukkan tipe kamar : (N/VIP/VVIP)\")\n room_number = input(\"Masukkan nomor kamar : \")\n durasi = int(input(\"Ingin booking kamar berapa malam?\"))\n id = room_code+room_number\n if room_code == \"N\":\n harga = Receptionist.daftar_harga[0][1]\n i.tagihan.append(harga*durasi)\n Room.Room.room_list.append(Room.Room(room_number, room_code))\n i.kamar.append(id)\n print(\"booking berhasil! \\nNama\\t\\t: {}\\nTagihan \\t: {} Rupiah\".format(i.nama, i.tagihan[0]))\n elif room_code == \"VIP\":\n harga = Receptionist.daftar_harga[1][1]\n i.tagihan.append(harga*durasi)\n Room.Room.room_list.append(Room.Room(room_number, room_code))\n i.kamar.append(id)\n print(\"booking berhasil! \\nNama\\t\\t: {}\\nTagihan \\t: {} Rupiah\".format(i.nama, i.tagihan[0]))\n elif room_code == \"VVIP\":\n harga = Receptionist.daftar_harga[2][1]\n i.tagihan.append(harga*durasi)\n Room.Room.room_list.append(Room.Room(room_number, room_code))\n i.kamar.append(id)\n print(\"booking berhasil! \\nNama\\t\\t: {}\\nTagihan \\t: {} Rupiah\".format(i.nama, i.tagihan[0]))\n else:\n print(\"masukkan tipe ruangan dengan benar!\")\n self.book()\"\"\"\n else : \n print(\"data tidak ditemukan\")\n self.book()\n #id = room_code+room_number\n #self.room_list.append(Room.Room(room_number, room_code))\n #for i in self.room_list:\n # if id not in i.id_room:\n # self.room_list.append(Room.Room(room_number, room_code))\n # else : \n # print(\"room ini telah dibooking! coba cari yang lain\")\n # ask = input(\"ingin cari kamar lain ?(Y/N) : \")\n # if ask ==\"Y\":\n # self.book()\n # else:\n # return\n \n def search_room(self):\n kode = input(\"Masukkan tipe kamar : (N/VIP/VVIP)\")\n cari = input(\"Masukkan nomor kamar : \")\n id = kode+cari\n for i in Room.Room.room_list : \n if id == i.id_room:\n print(\"Ruangan ini telah dibooking\")\n ask = input(\"apakah anda ingin mencari ruangan lagi?(Y/N) : \")\n if ask == \"Y\":\n self.search_room()\n else:\n break\n else :\n if kode == \"N\" or kode == \"VIP\" or kode == \"VVIP\":\n print(\"Ruangan ini tersedia\")\n ask = input(\"apakah anda ingin mencari ruangan lagi?(Y/N) : \")\n if ask == \"Y\":\n self.search_room()\n else:\n break\n else : \n print(\"itu bukan kode ruangan! gunakan huruf kapital (N/VIP/VVIP)\")\n self.search_room()\n\n def checkOut(self):\n KTP = input(\"masukkan nomor KTP anda : \")\n for visitor in Visitor.Visitor.list_visitor : \n if KTP == visitor.no_KTP :\n for room in Room.Room.room_list:\n if visitor.kamar == room : \n Room.Room.room_list.remove(room)\n visitor.checkOut()\n \n\n def debook(self):\n kode = input(\"Masukkan tipe kamar : (N/VIP/VVIP)\")\n cari = input(\"Masukkan nomor kamar : \")\n id = kode+cari\n for i in Room.Room.room_list:\n if id == i.id_room:\n print(\"Terima kasih telah berkunjung\")\n Room.Room.room_list.remove(i)\n else: \n print(\"data tidak ditemukan\")\n ask = input(\"apakah anda ingin mengulanginya lagi?(Y/N) : \")\n if ask == \"Y\":\n self.checkOut()\n else:\n return\n\n def getRoom_number(self):\n return self.room_number\n def getRoom_code(self):\n return self.room_code\n \n def setRoom_number(self):\n baru = input(\"masukkan nomor ruangan baru : \")\n self.room_number = baru\n def setRoom_code(self):\n baru = input(\"masukkan kode ruangan baru : \")\n self.room_code = baru\n\n def reservation(self):\n pass\n \n #def perpanjang_book(self):\n # room_code = input(\"Masukkan tipe kamar : (N/VIP/VVIP)\")\n # room_number = input(\"Masukkan nomor kamar : \")\n # self.room_list.append(Room.Room(room_number, room_code))\n\nclass Marketing_crew(Employee):\n list_MC = []\n jumlahMC = 0\n rev_vis = Visitor.Visitor.revenue\n def __init__(self, nama_emp, TL_emp, JK_emp, alamat_emp):\n super().__init__(nama_emp, TL_emp, \"Marketing Crew\", JK_emp, alamat_emp)\n Marketing_crew.jumlahMC += 1\n\n def laporan(self):\n total_vis = Visitor.Visitor.jumlahVis\n total_emp = Employee.jumlahEmp\n revenue = Marketing_crew.rev_vis\n print(\"Jumlah pengunjung : {} \\nJumlah Karyawan : {}\\nPendapatan : {}\".format(total_vis, total_emp, revenue))\n\n def getHarga(self):\n type = input(\"Masukkan tipe ruangan(N/VIP/VVIP) : \")\n if type == \"N\" or type == \"Normal\":\n print(\"Rp.{},- per malam untuk kamar {}\".format(Receptionist.daftar_harga[0][1], Receptionist.daftar_harga[0][0]))\n ulang = input(\"ulangi lagi ?(y/n) : \")\n if ulang == \"y\" or ulang == \"ya\" or ulang == \"Y\":\n self.getHarga()\n else : \n return\n elif type == \"VIP\":\n print(\"Rp.{},- per malam untuk kamar {}\".format(Receptionist.daftar_harga[1][1], Receptionist.daftar_harga[1][0]))\n ulang = input(\"ulangi lagi ?(y/n) : \")\n if ulang == \"y\" or ulang == \"ya\" or ulang == \"Y\":\n self.getHarga()\n else : \n return\n elif type == \"VVIP\":\n print(\"Rp.{},- per malam untuk kamar {}\".format(Receptionist.daftar_harga[2][1], Receptionist.daftar_harga[2][0]))\n ulang = input(\"ulangi lagi ?(y/n) : \")\n if ulang == \"y\" or ulang == \"ya\" or ulang == \"Y\":\n self.getHarga()\n else : \n return\n else: \n print(\"itu bukan tipe ruangan\")\n self.getHarga()\n\n #def upd_harga(self):\n # baru = input(\"masukkan harga baru : \")\n # Room.harga = baru\n\n def setHarga(self):\n untuk = input(\"harga tipe ruangan ini akan diganti per malamnya(N/VIP/VVIP) : \")\n baru = int(input(\"masukkan harga baru : \"))\n if untuk == \"N\":\n Receptionist.daftar_harga[0][1] = baru\n elif untuk == \"VIP\":\n Receptionist.daftar_harga[1][1] = baru\n elif untuk == \"VVIP\":\n Receptionist.daftar_harga[2][1] = baru\n else : \n print(\"hanya ada 3 tipe ruangan di hotel ini yaitu N/VIP/VVIP, mohon ulangi ya\")\n self.setHarga()\n #for i in Employee.Receptionist.daftar_harga : \n # for a in i : \n # if untuk == \"N\":\nclass Cashier(Employee):\n list_cashier = []\n jumlahCashier = 0\n def __init__(self, nama_emp, TL_emp, JK_emp, alamat_emp):\n super().__init__(nama_emp, TL_emp, \"Cashier\", JK_emp, alamat_emp)\n Cashier.jumlahCashier += 1\n\n\n def receipt(self):\n KTP = input(\"masukkan nomor KTP anda : \")\n for i in Visitor.Visitor.list_visitor : \n if KTP == i.no_KTP :\n i.checkOut()\n\n#print(Visitor.Visitor.list_visitor[0].__dict__)\n#johnny = Employee(\"Johnny Walkerine\", \"19283\", \"Supervisor\", \"male\", \"Cluster\")\n#print(johnny.__dict__)\n#m1 = Marketing_crew(\"bintang\", \"California\", \"Man\", \"NYC\")\n#m1.getHarga()\n#print(Receptionist.daftar_harga)\n#rec1 = Receptionist(\"bintang\", \"california\", \"BOY\", \"NYC\")\n#print(rec1.__dict__)\n#rec1.book()\n#rec1.search_room()\n#print(Room.Room.room_list[0].__dict__)\n#print(Room.Room.room_list)\n#rec1.checkOut()\n#print(Room.Room.room_list)\n#rec1.menu()\n#print(Room.Room.room_list)\n#print(Visitor.bintang.__dict__)\n#rec1.menu()\n#print(rec1.room_list[0].__dict__)\n#rec1.checkOut()\n#print(rec1.room_list[0].__dict__)\n#rec1.search_room()\n#print(rec1.room_list)\n#cas1 = Cashier(\"bradley\", \"london\", \"L\", \"NYC\")\n#cas1.receipt()","repo_name":"bintangedma/PBO-Implementasi-Class-Diagram","sub_path":"Class/Employee.py","file_name":"Employee.py","file_ext":"py","file_size_in_byte":11340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"12378269876","text":"\"\"\"\n\nPractice Projects\nFor practice, write programs to do the following tasks.\n\nComma Code\nSay you have a list value like this:\n\n\nspam = ['apples', 'bananas', 'tofu', 'cats']\nWrite a function that takes a list value as an argument and returns a string with all the items separated by a comma and a space, with and inserted before the last item. For example, passing the previous spam list to the function would return 'apples, bananas, tofu, and cats'. But your function should be able to work with any list value passed to it.\n\n\"\"\"\n\n\nspam = ['apples', 'bananas', 'tofu', 'cats']\n\ndef Comma_Sep(l):\n Sep_List = \"\"\n for char in range(len(l)):\n if char == len(l) - 1:\n Sep_List = Sep_List + \" and \" + l[char]\n elif char == 0:\n Sep_List = Sep_List + l[char]\n else:\n Sep_List = Sep_List + \", \" + l[char]\n return Sep_List\nprint(Comma_Sep(spam))\n","repo_name":"ROF618/Auto_The_Boring","sub_path":"ch.4.pracProj.py","file_name":"ch.4.pracProj.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"31659980058","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom django.views.generic import TemplateView\n\nurlpatterns = [\n path(\"\", TemplateView.as_view(template_name=\"index.html\")),\n path(\n \"cookies/\",\n include(\"gdpr_cookie_consent.urls\", namespace=\"cookie_consent\"),\n ),\n path(\"admin/\", admin.site.urls),\n]\n","repo_name":"archatas/django-gdpr-cookie-consent-demo-project","sub_path":"demo_project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"4"} +{"seq_id":"3382952985","text":"import unittest\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport copy\nimport pandas as pd\nimport numpy as np\nimport shapely\nfrom shapely import wkt\nimport geopandas as gpd\nfrom shapely.geometry import mapping\nimport math\nfrom math import cos,sin\nfrom shapely.geometry import Polygon\n\ndef grid_create(gdf,lon_incre,lat_incre):\n\n lon_min = gdf.geometry.x.min()\n lat_min = gdf.geometry.y.min()\n gdf['lon_id'] = gdf.apply(lambda r:int((r.geometry.x-lon_min)/lon_incre),axis=1)\n gdf['lat_id'] = gdf.apply(lambda r:int((r.geometry.y-lat_min)/lat_incre),axis=1)\n log_num = len(gdf['lon_id'].unique())\n lat_num = len(gdf['lat_id'].unique())\n geometry = []\n \n for i in range(len(gdf['lon_id'].unique())):\n for j in range(len(gdf['lat_id'].unique())):\n geometry.append(Polygon([\n (lon_min+i*lon_incre,lat_min+j*lat_incre),\n (lon_min+(i+1)*lon_incre,lat_min+j*lat_incre),\n (lon_min+(i+1)*lon_incre,lat_min+(j+1)*lat_incre),\n (lon_min+i*lon_incre,lat_min+(j+1)*lat_incre)]))\n \n created_grid = gpd.GeoDataFrame()\n created_grid['geometry'] = geometry\n\n grip_map = created_grid[created_grid.intersects(gdf.unary_union)]\n \n return created_grid,grip_map,gdf\n\ndef grid(gdf):\n \n gdf['grid'] = gdf.apply(lambda x:(x['lon_id'],x['lat_id']),axis=1)\n gdf = gdf.sort_values(by = ['lon_id','lat_id'])\n grid = [0]*len(gdf['grid'].unique())\n\n for grids in gdf['grid'].unique():\n grid_data = gdf[gdf['grid'].isin([grids])]\n grid.append(grid_data) \n grid = list(filter(lambda x: not isinstance(x, int), grid))\n \n return grid\n\nclass TestGrid(unittest.TestCase):\n def test_grid(self):\n \n points = gpd.GeoDataFrame({\n 'geometry': [shapely.geometry.Point([0, 0]),\n shapely.geometry.Point([0.5, 1]),\n shapely.geometry.Point([1, 0]),\n shapely.geometry.Point([1.5, 0.5])]\n })\n points_new = grid_create(points,1,1)[2]\n \n test0 = gpd.GeoDataFrame({\n 'geometry': [shapely.geometry.Point([0, 0])],\n 'lon_id': [0],\n 'lat_id': [0],\n 'grid':[(0,0)]\n })\n test2 = gpd.GeoDataFrame({\n 'geometry': [shapely.geometry.Point([1, 0]),\n shapely.geometry.Point([1.5, 0.5])],\n 'lon_id': [1,1],\n 'lat_id': [0,0],\n 'grid':[(1,0),(1,0)]\n })\n \n pd.testing.assert_frame_equal(grid(points_new)[0].reset_index(drop=True), test0)\n pd.testing.assert_frame_equal(grid(points_new)[2].reset_index(drop=True), test2)\n \n\nunittest.main()","repo_name":"Jingyi-hu/reportdetails","sub_path":"tests/test_grid.py","file_name":"test_grid.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"32825177834","text":"##You need three python modules are re,requests,BeautifulSoup4.\n##Run the paogram and input your gene name split by space,that you can get the function in order.\n##It write by vision python 3.9.\n\n\nimport re\nimport requests\nimport time\nimport random\nfrom bs4 import BeautifulSoup\nkv = {\"user-agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \\\n (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36\"}\ngenelist = input(\"输入查找的全部基因,空格分隔:\")\ngenelist = genelist.strip().split()\nfor gene in genelist:\n url = \"https://www.genecards.org/cgi-bin/carddisp.pl?gene=\"+ gene\n r = requests.get(url, headers=kv)\n time.sleep(random.randint(1,2))\n demo = r.text\n soup = BeautifulSoup(demo, \"html.parser\")\n sa = soup.find_all(\"a\", {\"data-ga-action\": \"GWA\"})\n\n list1 = []\n list2 = []\n list3 = []\n list4 = []\n for m in sa:\n list1.append(str(m))\n for n in list1:\n if 'title=' not in n:\n list2.append(str(n))\n for i in list2[1:]:\n re1 = r'data-ga-source-accession=(.*?)href'\n func = re.findall(re1, i)\n list3.append(func)\n for a in list3:\n b = str(a).replace('\"', '').replace(\"'\", \"\")\n list4.append(b)\n print(gene)\n print([str(list4).replace(\"[\", \"\").replace(\"]\", \"\").replace(\"'\", \"\")])\n","repo_name":"wpf95/the-function-of-gene-in-genecards","sub_path":"genecards_func.py","file_name":"genecards_func.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"6915097834","text":"#Funções Globais quq a LogoVM usa\r\n\r\ntry:\r\n from PIL import Image\r\nexcept ImportError:\r\n HAS_PIL_IMAGE = False\r\nelse:\r\n HAS_PIL_IMAGE = True\r\n\r\n\r\n__turtle_default = (0, 0)\r\n__turtle = (0, 0)\r\n__window = None\r\n\r\nflags = 0\r\nreg = [None] * 6\r\nstack = []\r\npc_stack = []\r\nimage_format = \"PNG\"\r\n\r\nMAXSTACKSIZE = 256 * (2**20)\r\n\r\nclass Flags:\r\n \"\"\"Flag mappings.\"\"\"\r\n\r\n PEN = 1\r\n DRAW = 2\r\n ERASE = 3\r\n _UNUSED = 4\r\n EXC = 5\r\n VERR = 15\r\n MAXFLAG = VERR\r\n\r\n\r\ndef __get_flag_index(flag):\r\n \"\"\"Ensure flag index is valid.\"\"\"\r\n flag = int(flag)\r\n if not 1 <= flag <= Flags.MAXFLAG:\r\n raise ValueError(\"Flag index must be in [1,{Flags.MAXFLAG}]\")\r\n return flag\r\n\r\n\r\ndef set_flag(flag):\r\n \"\"\"Set a flag.\"\"\"\r\n global flags\r\n logging.debug(\"BSET: %s: 0b%s\", flag, f\"{flags:0{Flags.MAXFLAG+1}b}\")\r\n flag = __get_flag_index(flag)\r\n flags |= 1 << flag\r\n logging.debug(\"SET: %s: 0b%s\", flag, f\"{flags:0{Flags.MAXFLAG+1}b}\")\r\n\r\n\r\ndef unset_flag(flag):\r\n \"\"\"Unset a flag.\"\"\"\r\n global flags\r\n logging.debug(\"BUNSET: %s: 0b%s\", flag, f\"{flags:0{Flags.MAXFLAG+1}b}\")\r\n flag = __get_flag_index(flag)\r\n flags &= ~(1 << flag)\r\n logging.debug(\"UNSET: %s: 0b%s\", flag, f\"{flags:0{Flags.MAXFLAG+1}b}\")\r\n\r\n\r\ndef isset(flag):\r\n \"\"\"Check if a flag is set.\"\"\"\r\n logging.debug(\"ISSET: %d %s\", flag, f\"{flags:0{Flags.MAXFLAG+1}b}\")\r\n return bool(flags & (1 << __get_flag_index(flag)))\r\n\r\n\r\ndef __init_video(width, height):\r\n \"\"\"Initialize video memory.\"\"\"\r\n global __window\r\n bpp = 1\r\n vidmem = [0] * (height * width * bpp)\r\n logging.debug(\r\n \"width=%d height=%d bpp=%d stride=%d\",\r\n width,\r\n height,\r\n bpp,\r\n width * bpp,\r\n )\r\n __window = [width, height, bpp, width * bpp, vidmem]\r\n\r\n\r\ndef reset_video():\r\n \"\"\"Resetar o video da memoria.\"\"\"\r\n __init_video(__window[0], __window[1])\r\n unset_flag(Flags.DRAW)\r\n\r\n\r\ndef __save_video(filename):\r\n \"\"\"Salvar memória de vídeo em um nome de arquivo filename.\"\"\"\r\n logging.debug(\"save_video: %s\", filename)\r\n if isset(Flags.VERR):\r\n logging.warning(\"A video error occured.\")\r\n if isset(Flags.DRAW):\r\n if HAS_PIL_IMAGE and image_format.lower() in [\"jpg\", \"png\"]:\r\n __save_as_PIL(filename)\r\n elif image_format.lower() in [\"ppm\", \"pnm\", \"pgm\", \"netpbm\", \"pbm\"]:\r\n __save_as_PPM(filename)\r\n\r\n\r\ndef __save_as_PIL(filename):\r\n logging.debug(\"Saving with PIL: %s %s\", filename, image_format)\r\n logging.debug(\"WINDOW: %s\", repr(__window))\r\n width, height, bpp, _stride, data = __window\r\n data = bytes(data)\r\n mode = \"L\" if bpp == 1 else \"RGB\"\r\n img = Image.frombytes(mode, (width, height), data)\r\n img.save(f\"{filename}.{image_format.lower()}\")\r\n\r\n\r\ndef __save_as_PPM(filename):\r\n logging.debug(\"Saving PNM: %s\", filename)\r\n logging.debug(\"WINDOW: %s\", repr(__window))\r\n width, height, bpp, stride, data = __window\r\n mode = \"P2\" if bpp == 1 else \"P3\"\r\n ext = \"pgm\" if bpp == 1 else \"ppm\"\r\n with open(f\"{filename}.{ext}\", \"w\", encoding=\"ascii\") as out:\r\n print(mode, file=out)\r\n print(f\"# {filename}.{ext}\", file=out)\r\n print(f\"{width} {height}\", file=out)\r\n print(\"255\", file=out)\r\n if isset(Flags.VERR):\r\n print(\"# WARNING: A video error occured.\", file=out)\r\n for j in range(height):\r\n start = stride * j\r\n print(\r\n \" \".join(str(v) for v in data[start : start + stride]),\r\n file=out,\r\n )\r\n print(\"\".join(f\"{v: 4d}\" for v in data[start : start + stride]))\r\n\r\n\r\ndef __plot(x, y, color=255):\r\n \"\"\"Defina um pixel com a cor dada.\"\"\"\r\n width, height, bpp, stride, vidmem = __window\r\n logging.debug(\"params: x=%s y=%s width=%s height=%s\", x, y, width, height)\r\n if not 0 <= x < width:\r\n return\r\n if not 0 <= y < height:\r\n return\r\n if isinstance(color, (int, float)):\r\n color = [int(color)]\r\n else:\r\n color = [int(v) for v in color]\r\n if len(color) != bpp:\r\n set_flag(Flags.VERR)\r\n if bpp == 1:\r\n color = sum(color, 0) // len(color)\r\n else:\r\n color = color * 3\r\n color = color[0] if bpp == 1 else color\r\n x = round(x)\r\n y = round(y)\r\n position = stride * y + x * bpp\r\n vidmem[position] = color\r\n logging.debug(\"vidmem:x=%s y=%s c=%s pos=%s\", x, y, color, position)\r\n set_flag(Flags.DRAW)\r\n\r\n\r\ndef set_pixel():\r\n \"\"\"Implementar a instrucao: SETPX.\"\"\"\r\n if isset(Flags.PEN):\r\n __plot(*__turtle[:2], 0 if isset(Flags.ERASE) else 255)\r\n\r\n\r\ndef draw_line():\r\n \"\"\"Desenhe um segmento de linha na memória de vídeo da posição atual até o alvo.\"\"\"\r\n x0, y0, x1, y1 = reg[:6]\r\n logging.debug(\"x0=%d y0=%d x1=%d y1=%d\", x0, y0, x1, y1)\r\n if isset(Flags.PEN):\r\n dx = abs(x1 - x0)\r\n sx = copysign(1, x1 - x0)\r\n dy = -abs(y1 - y0)\r\n sy = copysign(1, y1 - y0)\r\n error = dx + dy\r\n\r\n while True:\r\n __plot(x0, y0)\r\n if round(x0) == round(x1) and round(y0) == round(y1):\r\n break\r\n error2 = 2 * error\r\n logging.debug(\"dx %s dy %s error %s e2 %s\", dx, dy, error, error2)\r\n if error2 >= dy:\r\n if round(x0) == round(x1):\r\n break\r\n error = error + dy\r\n x0 = x0 + sx\r\n if error2 <= dx:\r\n if round(y0) == round(y1):\r\n break\r\n error = error + dx\r\n y0 = y0 + sy\r\n\r\n stack_push(x1, y1)\r\n set_pos()\r\n\r\n\r\ndef get_pos():\r\n \"\"\"Obtenha a posição do ponteiro como (R0, R1).\"\"\"\r\n logging.debug(\"TURTLE: %s\", repr(__turtle))\r\n reg[0] = __turtle[0]\r\n reg[1] = __turtle[1]\r\n return (reg[0], reg[1])\r\n\r\n\r\ndef set_pos():\r\n \"\"\"Defina a posição do ponteiro para (R0, R1).\"\"\"\r\n global __turtle\r\n _, _, *extra = __turtle\r\n reg[1] = stack_pop()\r\n reg[0] = stack_pop()\r\n __turtle = (reg[0], reg[1], *extra)\r\n logging.debug(\"TURTLE: %s\", repr(__turtle))\r\n\r\ndef stack_peek():\r\n \"\"\"Peek valor no topo da pilha.\"\"\"\r\n return stack[-1] if stack else None\r\n\r\n\r\ndef stack_pop():\r\n \"\"\"Retirar um valor da pilha.\"\"\"\r\n if not stack:\r\n raise EmptyStackError()\r\n value = stack.pop()\r\n logging.debug(\"STACK: %s\", repr(stack))\r\n return value\r\n\r\n\r\ndef stack_push(*args):\r\n \"\"\"Empurre um valor para a pilha.\"\"\"\r\n for value in args:\r\n if len(stack) < MAXSTACKSIZE:\r\n stack.append(value)\r\n else:\r\n raise StackOverflowError()\r\n logging.debug(\"STACK: %s\", repr(stack))\r\n\r\n\r\ndef halt():\r\n \"\"\"Delisgamento da maquina.\"\"\"\r\n filename = datetime.now().strftime(\"%Y%m%d-%H%M%S.%s\")\r\n logging.debug(\"HALT: %s %s\", filename, image_format)\r\n pc_stack.clear()\r\n __save_video(filename)\r\n\r\n\r\ndef init(**kwargs):\r\n\r\n global __turtle\r\n global __turtle_default\r\n width, height = kwargs.get(\"width\", 456), kwargs.get(\"height\", 182)\r\n x, y = kwargs.get(\"x\", width // 2), kwargs.get(\"y\", height // 2)\r\n __turtle_default = (x, y)\r\n __turtle = (x, y)\r\n __init_video(width, height)\r\n unset_flag(Flags.DRAW)","repo_name":"Luh022/Trabalho-Final-Comp","sub_path":"maquinario.py","file_name":"maquinario.py","file_ext":"py","file_size_in_byte":7256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"71952164596","text":"__all__ = [\n 'body_decode',\n 'body_encode',\n 'decode',\n 'decodestring',\n 'header_encode',\n 'header_length',\n ]\n\n\nfrom base64 import b64encode\nfrom binascii import b2a_base64, a2b_base64\n\nCRLF = '\\r\\n'\nNL = '\\n'\nEMPTYSTRING = ''\n\n# See also Charset.py\nMISC_LEN = 7\n\n\n# Helpers\ndef header_length(bytearray):\n \"\"\"Return the length of s when it is encoded with base64.\"\"\"\n groups_of_3, leftover = divmod(len(bytearray), 3)\n # 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.\n n = groups_of_3 * 4\n if leftover:\n n += 4\n return n\n\n\ndef header_encode(header_bytes, charset='iso-8859-1'):\n \"\"\"Encode a single header line with Base64 encoding in a given charset.\n\n charset names the character set to use to encode the header. It defaults\n to iso-8859-1. Base64 encoding is defined in RFC 2045.\n \"\"\"\n if not header_bytes:\n return \"\"\n if isinstance(header_bytes, str):\n header_bytes = header_bytes.encode(charset)\n encoded = b64encode(header_bytes).decode(\"ascii\")\n return '=?%s?b?%s?=' % (charset, encoded)\n\n\ndef body_encode(s, maxlinelen=76, eol=NL):\n r\"\"\"Encode a string with base64.\n\n Each line will be wrapped at, at most, maxlinelen characters (defaults to\n 76 characters).\n\n Each line of encoded text will end with eol, which defaults to \"\\n\". Set\n this to \"\\r\\n\" if you will be using the result of this function directly\n in an email.\n \"\"\"\n if not s:\n return \"\"\n\n encvec = []\n max_unencoded = maxlinelen * 3 // 4\n for i in range(0, len(s), max_unencoded):\n # BAW: should encode() inherit b2a_base64()'s dubious behavior in\n # adding a newline to the encoded string?\n enc = b2a_base64(s[i:i + max_unencoded]).decode(\"ascii\")\n if enc.endswith(NL) and eol != NL:\n enc = enc[:-1] + eol\n encvec.append(enc)\n return EMPTYSTRING.join(encvec)\n\n\ndef decode(string):\n \"\"\"Decode a raw base64 string, returning a bytes object.\n\n This function does not parse a full MIME header value encoded with\n base64 (like =?iso-8859-1?b?bmloISBuaWgh?=) -- please use the high\n level email.header class for that functionality.\n \"\"\"\n if not string:\n return bytes()\n elif isinstance(string, str):\n return a2b_base64(string.encode('raw-unicode-escape'))\n else:\n return a2b_base64(string)\n\n\n# For convenience and backwards compatibility w/ standard base64 module\nbody_decode = decode\ndecodestring = decode\n","repo_name":"python/cpython","sub_path":"Lib/email/base64mime.py","file_name":"base64mime.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","stars":56926,"dataset":"github-code","pt":"4"} +{"seq_id":"6321353054","text":"import numpy as np\nimport numpy.ma as ma\nimport time, traceback, logging, struct, sys\nfrom messageParser import mParse\nIO_DType = np.dtype( np.float32 ).newbyteorder('>')\nfrom abc import ABCMeta, abstractmethod\n\ndef getFillValue( array ):\n try: return array.get_fill_value()\n except: return sys.float_info.max\n\nclass CDArray:\n __metaclass__ = ABCMeta\n\n def __init__(self, _id, _origin, _shape, _metadata ):\n self.logger = logging.getLogger(\"worker\")\n self.id = _id\n self.origin = _origin\n self.shape = _shape\n self.metadata = _metadata\n self.roi = self.parseRoi()\n self.logger.debug(\"Created Array: {0}\".format(self.id))\n self.logger.debug(\" >> Array Metadata: {0}\".format(self.metadata))\n self.logger.debug(\" >> Array Shape: [{0}]\".format(', '.join(map(str, self.shape))))\n self.logger.debug(\" >> Array Origin: [{0}]\".format(', '.join(map(str, self.origin))))\n self.logger.debug(\" >> Array ROI: [{0}]\".format(', '.join(map(str, self.roi.items()))))\n\n @classmethod\n @abstractmethod\n def createResult(cls, task, input, result_array ): raise Exception( \"Executing abstract method createResult in CDArray\")\n\n def uid(self): return self.id.split('-')[0]\n\n def getGridBounds(self):\n gridBnds = self.metadata.get(\"gridbnds\")\n if ( gridBnds is None ): return None\n else:\n bndVals = [ float(grdBnd) for grdBnd in gridBnds.split(\",\") ]\n return ( (bndVals[0],bndVals[1]), (bndVals[2],bndVals[3]) )\n\n\n @classmethod\n @abstractmethod\n def createInput(cls, header, data): raise Exception( \"Executing abstract method createInput in CDArray\")\n\n @abstractmethod\n def getVariable( self, gridFile = None ): pass\n\n @abstractmethod\n def array(self): pass\n\n @abstractmethod\n def getGrid(self): pass\n\n @abstractmethod\n def subsetAxes(self, dimensions, gridfile, origin, shape ): pass\n\n @abstractmethod\n def toBytes( self, dtype ): pass\n\n def getAxisSection( self, axis ): return None if self.roi == None else self.roi.get( axis.lower(), None )\n\n def parseRoi(self):\n roiSpec = self.metadata.get(\"roi\")\n roiMap = {}\n if( roiSpec != None ):\n self.logger.info(\" ***->> Parsing ROI spec: {0}\".format( roiSpec ) )\n for roiTok in roiSpec.split('+'):\n axisToks = roiTok.split(',')\n roiMap[ axisToks[0].lower() ] = ( int(axisToks[1]), int(axisToks[2]) + 1 )\n return roiMap\n\nclass npArray(CDArray):\n\n @classmethod\n def createResult(cls, task, input, result_array ):\n return npArray( task.rId, input.origin, result_array.shape, dict( input.metadata, **task.metadata ), result_array, input.undef )\n\n @classmethod\n def createAuxResult( cls, id, metadata, input, result_array ):\n return npArray( id, input.origin, result_array.shape, metadata, result_array, input.undef )\n\n def toBytes( self, dtype ):\n return self.array.astype(dtype).tobytes() + np.array([self.undef]).astype(dtype).tobytes() # bytearray(struct.pack(\"f\", self.undef))\n\n @classmethod\n def createInput(self, header, data):\n logger = logging.getLogger(\"worker\")\n logger.info(\" ***->> Creating Input, header = {0}\".format( header ) )\n header_toks = header.split('|')\n id = header_toks[1]\n origin = mParse.s2it(header_toks[2])\n shape = mParse.s2it(header_toks[3])\n metadata = mParse.s2m(header_toks[4])\n if data:\n logger.info(\" *** Creating Input, id = {0}, data size = {1}, shape = {2}\".format( id, len(data), str(shape) ) )\n raw_data = np.frombuffer( data, dtype=IO_DType ).astype(np.float32)\n undef_value = raw_data[-1]\n logger.info(\" *** buffer len = {0}, undef = {1}\".format( str(len(raw_data)), str(undef_value) ) )\n data_array = ma.masked_invalid( raw_data[0:-1].reshape(shape) )\n nparray = ma.masked_equal(data_array,undef_value) if ( undef_value != 1.0 ) else data_array\n else:\n nparray = None\n undef_value = float('inf')\n return npArray( id, origin, shape, metadata, nparray, undef_value )\n\n def __init__(self, _id, _origin, _shape, _metadata, _ndarray, _undef ):\n super(npArray, self).__init__(_id,_origin,_shape,_metadata)\n self.gridFile = self.metadata[\"gridfile\"]\n self.name = self.metadata.get(\"name\",\"\")\n self.collection = self.metadata.get(\"collection\",\"\")\n self.dimensions = self.metadata.get(\"dimensions\",\"\").split(\",\")\n self.array = _ndarray\n self.undef = _undef\n self.variable = None\n self.logger.info(\" *** Creating NP data array, nbytes = \" + str(self.nbytes()) + \", undef = \" + str(self.undef) )\n\n def getSelector(self, variable, **args):\n kargs = {}\n for idim in range( variable.rank() ):\n axis = variable.getAxis(idim)\n start = self.origin[idim]\n end = start + self.shape[idim]\n interval = [ start, end ]\n kargs[axis.id] = slice(*interval)\n return kargs\n\n def nbytes(self): return self.array.nbytes if (self.array != None) else 0\n def array(self): return self.array\n\n def getGrid1(self):\n import cdms2\n gridfile = cdms2.open(self.gridFile)\n baseGrid = gridfile.grids.values()[0]\n gridBnds = self.getGridBounds()\n if ( gridBnds is None ): return baseGrid\n else:\n variable = self.getVariable()\n (lataxis, lonaxis) = (variable.getLatitude(), variable.getLongitude())\n (latInterval, lonInterval) = (lataxis.mapInterval( gridBnds[0] ), lonaxis.mapInterval( gridBnds[1] ))\n return baseGrid.subGrid( latInterval, lonInterval )\n\n def getGrid(self):\n import cdms2\n gridfile = cdms2.open(self.gridFile)\n baseGrid = gridfile.grids.values()[0]\n (latInterval, lonInterval) = ( self.getAxisSection('y'), self.getAxisSection('x') )\n if ( (latInterval == None) or (lonInterval == None) ): return baseGrid\n else: return baseGrid.subGrid( latInterval, lonInterval )\n\n def getVariable( self, gridFilePath = None ):\n import cdms2\n if( self.variable is None ):\n t0 = time.time()\n gridfile = cdms2.open( self.gridFile if (gridFilePath==None) else gridFilePath )\n var = gridfile[self.name]\n grid = gridfile.grids.values()[0]\n partition_axes = self.subsetAxes(self.dimensions, gridfile, self.origin, self.shape)\n self.variable = cdms2.createVariable(self.array, typecode=None, copy=0, savespace=0, mask=None, fill_value=var.getMissing(),\n grid=grid, axes=partition_axes, attributes=self.metadata, id=self.collection + \"-\" + self.name)\n self.variable.createattribute(\"gridfile\", self.gridFile)\n self.variable.createattribute(\"origin\", mParse.ia2s(self.origin))\n t1 = time.time()\n self.logger.info(\" >> Created CDMS Variable: {0} ({1}) in time {2}, gridFile = {3}\".format(self.variable.id, self.name, (t1 - t0), self.gridFile))\n return self.variable\n\n def subsetAxes( self, dimensions, gridfile, origin, shape ):\n subAxes = []\n try:\n for index in range( len(dimensions) ):\n start = origin[index]\n length = shape[index]\n dim = dimensions[index]\n axis = gridfile.axes.get(dim)\n subAxes.append( axis.subAxis( start, start + length ) )\n self.logger.info( \" >> Axis: {0}, length: {1} \".format( dim, length ) )\n except Exception as err:\n self.logger.info( \"\\n-------------------------------\\nError subsetting Axes: {0}\\n{1}-------------------------------\\n\".format(err, traceback.format_exc() ) )\n raise err\n return subAxes\n\n\nclass cdmsArray(CDArray):\n\n @classmethod\n def createResult(cls, task, input, cdVariable ):\n return cdmsArray( task.rId, input.origin, cdVariable.shape, dict( input.metadata, **task.metadata ), cdVariable )\n\n @classmethod\n def getName(cls, variable ):\n try: return variable.name_in_file;\n except: return variable.id;\n\n\n @classmethod\n def createInput( cls, cdVariable ):\n id = cdVariable.id\n origin = cdVariable.attributes.get(\"origin\")\n shape = cdVariable.shape\n metadata = cdVariable.attributes\n return cdmsArray( id, origin, shape, metadata, cdVariable )\n\n def array(self):\n return self.variable.data\n\n def toBytes( self, dtype ):\n return self.variable.data.astype(dtype).tobytes() + np.array([ self.variable.getMissing() ]).astype(dtype).tobytes() # bytearray( struct.pack(\"f\", self.variable.getMissing()))\n\n def __init__(self, _id, _origin, _shape, _metadata, cdVariable ):\n super(cdmsArray, self).__init__(_id,_origin,_shape,_metadata)\n self.logger.info(\" *** Creating input cdms array, size = \" + str( cdVariable.size ) )\n self.name = cdmsArray.getName(cdVariable)\n self.grid = cdVariable.getGrid()\n self.dimensions = self.metadata[\"dimensions\"].split(\",\")\n self.variable = cdVariable\n\n def getVariable( self, gridFile= None ): return self.variable\n\n def getGrid(self):\n baseGrid = self.variable.getGrid()\n (latInterval, lonInterval) = ( self.getAxisSection('y'), self.getAxisSection('x') )\n if ( (latInterval == None) or (lonInterval == None) ): return baseGrid\n else: return baseGrid.subGrid( latInterval, lonInterval )\n\n def getGrid1(self):\n gridBnds = self.getGridBounds()\n if ( gridBnds == None ): return self.variable.getGrid()\n else:\n (lataxis, lonaxis) = (self.variable.getLatitude(), self.variable.getLongitude())\n (latInterval, lonInterval) = (lataxis.mapInterval( gridBnds[0] ), lonaxis.mapInterval( gridBnds[1] ))\n self.logger.info( \" latInterval {0} --- lonInterval {1} \".format( str(latInterval), str(lonInterval) ) )\n return self.variable.getGrid().subGrid( latInterval, lonInterval )\n\n def subsetAxes( self, dimensions, gridfile, origin, shape ):\n subAxes = []\n try:\n for index in range( len(dimensions) ):\n start = origin[index]\n length = shape[index]\n dim = dimensions[index]\n axis = gridfile.axes.get(dim)\n subAxes.append( axis.subAxis( start, start + length ) )\n self.logger.info( \" >> Axis: {0}, length: {1} \".format( dim, length ) )\n except Exception as err:\n self.logger.info( \"\\n-------------------------------\\nError subsetting Axes: {0}\\n{1}-------------------------------\\n\".format(err, traceback.format_exc() ) )\n raise err\n return subAxes\n\n\n","repo_name":"nasa-nccs-cds/CDAS2","sub_path":"python/src/pycdas/cdasArray.py","file_name":"cdasArray.py","file_ext":"py","file_size_in_byte":10927,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"4"} +{"seq_id":"35726843111","text":"\nfrom chem_ppo_parallel import main\nimport argparse, sys\nimport itertools\n\nextrinsic_rewards=[True]\ncuriosity_weights=[0, 0.005, 0.01, 0.02, 0.04] \nuse_previous_molecule_distance_rewards=[False]\nscoring_fncs=['SIMILARITY']\naction_modes=['APPEND']\ndiscount_factors=[1.0] \nk_epochss=[4] \nenv_agent_share_encoders=[False]\nl2_curiositys=[False] \nwinner_curiositys=[False] \nevery_time_step_feedbacks=[True]\njob_ids=[2,3]\ncuriosity_buffer_sizes=[20000]\nentropy_weights=[0.01, 0.02, 0.03, 0.04, 0.05]\nfingerprint_radiuses = [2]\nfingerprint_bitss = [256]\nlsh_bitss = [16, 32, 64, 128]\n\n\nhyperparameter_dict_list = []\n\n\nfor hyperparameter in list(itertools.product(extrinsic_rewards, curiosity_weights, \n use_previous_molecule_distance_rewards, scoring_fncs, action_modes, \n discount_factors, k_epochss, env_agent_share_encoders, \n l2_curiositys, winner_curiositys, every_time_step_feedbacks, curiosity_buffer_sizes, job_ids, entropy_weights, fingerprint_radiuses, fingerprint_bitss, lsh_bitss)):\n extrinsic_reward, curiosity_weight, use_previous_molecule_distance_reward, scoring_fnc, action_mode, discount_factor, k_epochs, env_agent_share_encoder, l2_curiosity, winner_curiosity, every_time_step_feedback, curiosity_buffer_size, job_id, entropy_weight, fingerprint_radius, fingerprint_bits, lsh_bits = hyperparameter\n\n if curiosity_weight == 0 and lsh_bits > 16 or curiosity_weight > 0 and entropy_weight not in [0.01, 0.02]:\n continue\n\n hyperparameter_dict = {'extrinsic_reward':extrinsic_reward, 'intrinsic_reward_weight':curiosity_weight,'use_previous_molecule_distance_reward':use_previous_molecule_distance_reward,\n 'scoring_fnc':scoring_fnc, 'action_mode':action_mode, 'discount_factor':discount_factor, 'k_epochs':k_epochs, 'env_agent_share_encoder':env_agent_share_encoder,\n 'l2_curiosity':l2_curiosity, 'winner_curiosity':winner_curiosity, 'every_time_step_feedback':every_time_step_feedback, 'job_id':job_id, \n 'curiosity_buffer_size':curiosity_buffer_size, 'entropy_weight':entropy_weight, 'fingerprint_radius':fingerprint_radius, 'fingerprint_bits':fingerprint_bits,\n 'lsh_bits': lsh_bits}\n\n hyperparameter_dict_list.append(hyperparameter_dict)\n\n\n\nfor i, hyperparameter_dict in enumerate(hyperparameter_dict_list):\n hyperparameter_dict['i']=i\n hyperparameter_dict_list[i] = hyperparameter_dict\n\n\nprint('LENGTH: ', len(hyperparameter_dict_list))\n'''\nfor i in range(len(hyperparameter_dict_list)):\n print(hyperparameter_dict_list[i])\n'''\n\ndef launch_main(i, results_directory):\n i = int(i)\n if i == len(hyperparameter_dict_list):\n i = 0\n\n job_id = hyperparameter_dict_list[i]['job_id']\n curiosity_weight = hyperparameter_dict_list[i]['intrinsic_reward_weight']\n entropy_weight= hyperparameter_dict_list[i]['entropy_weight']\n do_plot = False\n pca = 0\n extrinsic_reward = hyperparameter_dict_list[i]['extrinsic_reward']\n use_previous_molecule_distance_reward = hyperparameter_dict_list[i]['use_previous_molecule_distance_reward']\n scoring_fnc = hyperparameter_dict_list[i]['scoring_fnc']\n if scoring_fnc == 'SIMILARITY':\n max_string_length = 60\n else:\n max_string_length = 35\n action_mode = hyperparameter_dict_list[i]['action_mode']\n discount_factor = hyperparameter_dict_list[i]['discount_factor']\n k_epochs = hyperparameter_dict_list[i]['k_epochs']\n env_agent_share_encoder = hyperparameter_dict_list[i]['env_agent_share_encoder']\n num_episodes = 1000\n l2_curiosity = hyperparameter_dict_list[i]['l2_curiosity']\n winner_curiosity = hyperparameter_dict_list[i]['winner_curiosity']\n every_time_step_feedback = hyperparameter_dict_list[i]['every_time_step_feedback']\n curiosity_buffer_size = hyperparameter_dict_list[i]['curiosity_buffer_size']\n fingerprint_bits = hyperparameter_dict_list[i]['fingerprint_bits']\n fingerprint_radius = hyperparameter_dict_list[i]['fingerprint_radius']\n lsh_bits = hyperparameter_dict_list[i]['lsh_bits']\n\n\n print('job_id ', job_id, '\\n',\n 'intrinsic_reward_weight ', curiosity_weight, '\\n',\n 'do_plot ', do_plot, '\\n',\n 'pca ', pca, '\\n',\n 'extrinsic_reward ', extrinsic_reward, '\\n',\n 'use_previous_molecule_distance_reward ', use_previous_molecule_distance_reward, '\\n',\n 'scoring_fnc ', scoring_fnc, '\\n',\n 'max_string_length ', max_string_length, '\\n',\n 'action_mode ', action_mode, '\\n',\n 'discount_factor ', discount_factor, '\\n', \n 'k_epochs ', k_epochs, '\\n',\n 'env_agent_share_encoder ', env_agent_share_encoder, '\\n',\n 'num_episodes ', num_episodes, '\\n',\n 'l2_curiosity ', l2_curiosity, '\\n',\n 'winner_curiosity ', winner_curiosity, '\\n',\n 'every_time_step_feedback ', every_time_step_feedback, '\\n',\n 'curiosity_buffer_size ', curiosity_buffer_size, '\\n',\n 'fingerprint_bits ', fingerprint_bits, '\\n',\n 'fingerprint_radius ', fingerprint_radius, '\\n',\n 'entropy_weight ', entropy_weight, '\\n',\n 'lsh_bits ', lsh_bits)\n print(hyperparameter_dict_list[i])\n\n\n n_epochs_pred_network = 10000\n\n main(job_id, curiosity_weight, do_plot, pca, extrinsic_reward, use_previous_molecule_distance_reward,\n scoring_fnc, max_string_length, action_mode, discount_factor, k_epochs, env_agent_share_encoder, num_episodes,\n l2_curiosity, winner_curiosity, device='cuda:0', batch_size=64, every_time_step_feedback = every_time_step_feedback, load_previous_experiment=True, \n results_directory=results_directory, n_epochs_pred_network=n_epochs_pred_network, curiosity_buffer_size=curiosity_buffer_size, entropy_weight=entropy_weight, \n fingerprint_radius = fingerprint_radius, fingerprint_bits=fingerprint_bits, lsh_bits = lsh_bits)\n\n\n\n\n\nif __name__ == '__main__':\n parser=argparse.ArgumentParser()\n \n parser.add_argument('--id', default=0)\n parser.add_argument('--results_directory', default='.')\n args=parser.parse_args()\n job_id = int(args.id)\n results_directory = args.results_directory\n\n print('JOB ID: ', job_id)\n print('RESULTS DIRECTORY: ', results_directory)\n launch_main(job_id, results_directory)\n \n '''\n for i in range(1,len(hyperparameter_dict_list)):\n launch_main(i, '.')\n '''\n\n\n#sudo apt install openbabel","repo_name":"aspuru-guzik-group/curiosity","sub_path":"experiment_schedule.py","file_name":"experiment_schedule.py","file_ext":"py","file_size_in_byte":6498,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"4"} +{"seq_id":"10840375362","text":"\"\"\"\n@author:maohui\n@time:2022/7/5 13:10\n         ┏┓ ┏┓+ +\n        ┏┛┻━━━━┛┻┓ + +\n        ┃ ┃   \n        ┃ ━ ┃ ++ + + +\n        ████━████ ┃+\n        ┃ ┃ +\n        ┃ ┻ ┃\n        ┃ ┃ + +\n        ┗━┓ ┏━━┛\n         ┃ ┃\n         ┃ ┃ + + + +\n         ┃ ┃   Code is far away from bug with the animal protecting\n         ┃ ┃+     神兽保佑,代码无bug\n         ┃ ┃\n         ┃ ┃  +\n         ┃ ┗━━━━━━━┓ + + \n         ┃ ┣┓\n         ┃ ┏┛\n         ┗┓┓┏━━━━━┳┓┏┛ + + + +\n         ┃┫┫ ┃┫┫\n         ┗┻┛ ┗┻┛+ + + +\n\"\"\"\nimport datetime\n\nimport pytest\n\nfrom configs.config import TOKEN, HOST\nfrom tools.httpclient import HttpClient\nfrom tools.yamlControl import get_yaml_data1\n\n\nclass TestSampleMsg():\n\n @classmethod\n def setup_class(cls):\n cls.file_id=\"\"\n\n @classmethod\n def teardown_class(cls):\n pass\n\n #3.1增加样本\n @pytest.mark.skip(reason=\"自增字段,不能随意增加样本\")\n def test_add_sample(self):\n \"\"\"增加样本,暂时不测试,跳过\"\"\"\n pass\n #3.2按样本id修改样本信息\n def test_acd_sampleId_alter_sampleMsg(self):\n \"\"\"3.2按样本id修改样本信息\"\"\"\n request_data = {\n # \"sys_id\": 1,\n # \"type\": 1,\n # \"cid\": 1,\n # \"collect_time\": \"2020-12-16\",\n # \"channel\": \"\",\n # \"risk\": 2,\n # \"result\": \"好\",\n \"guidance\": f\"继续作{datetime.datetime.today()}\"\n }\n response = HttpClient().send_request(method='put', url=f'{HOST}/sample/1330', param_type='application/json',\n data=request_data, headers={\"Authorization\": f'Bearer<{TOKEN}>'})\n assert response.json()['success'] == True\n print(response.json())\n #3.3按样本id插入文件信息\n @pytest.mark.parametrize('data',get_yaml_data1('../data/Acd_sampleid_insert_file_msg.yaml'))\n def test_acd_sampleId_insert_filemsg(self,data):\n \"\"\"3.3按样本id插入文件信息\"\"\"\n request_data = {\n \"sys_id\":1,\n \"sid\":1330,\n \"files\":[{\"group\":1,\"type\":1,\"oss_id\":1,\"path\":\"1516-2ba8-4ba6-bdc9-7af7852b02ae.html\"}]\n }\n response = HttpClient().send_request(method=data['method'], url=f'{HOST}/{data[\"url\"]}',\n param_type='application/json',\n data=request_data, headers={\"Authorization\": f'Bearer<{TOKEN}>'})\n assert response.json()['success'] == data[\"success\"]\n global file_id\n file_id=response.json()[\"fid_map\"][0][\"id\"]\n print(response.json())\n #3.4按文件id删除文件\n @pytest.mark.parametrize('data', get_yaml_data1('../data/Acd_fileid_delete_file.yaml'))\n def test_acd_fileId_delete_file(self,data):\n \"\"\"3.4按文件id删除文件\"\"\"\n request_data = \"\"\n response = HttpClient().send_request(method=data['method'], url=f'{HOST}/{data[\"url\"]}/{file_id}',\n param_type='application/json',\n data=None, headers={\"Authorization\": f'Bearer<{TOKEN}>'})\n assert response.json()['success'] == data[\"success\"]\n print(response.json())\n #3.5按文件id插入(修改)项目代号\n @pytest.mark.parametrize('data', get_yaml_data1('../data/Acd_fileid_change_project.yaml'))\n def test_acd_fileId_change_project(self,data):\n \"\"\"3.5按文件id插入(修改)项目代号\"\"\"\n request_data = data[\"request\"]\n response = HttpClient().send_request(method=data['method'], url=f'{HOST}/{data[\"url\"]}',\n param_type='application/json',\n data=request_data, headers={\"Authorization\": f'Bearer<{TOKEN}>'})\n assert response.json()['success'] == data[\"success\"]\n print(response.json())\n #3.6按时间段查询样本\n @pytest.mark.parametrize('data', get_yaml_data1('../data/Acd_time_inquire_sample.yaml'))\n def test_acd_time_inquire_sample(self,data):\n \"\"\"3.6按时间段查询样本\"\"\"\n # request_data = {\"begin_date\":data[\"request\"]['begin_date'],\n # \"end_date\":data[\"request\"]['end_date']}\n request_data =data[\"request\"]\n response = HttpClient().send_request(method=data['method'], url=f'{HOST}/{data[\"url\"]}',\n param_type='application/json',\n data=request_data, headers={\"Authorization\": f'Bearer<{TOKEN}>'})\n assert response.json()['success'] == data[\"success\"]\n print(response.json())\n #3.7按样本id查询样本\n @pytest.mark.parametrize('data', get_yaml_data1('../data/Acd_sampleid_inquire_sample.yaml'))\n def test_acd_sampleId_inquire_sample(self,data):\n \"\"\"3.7按样本id查询样本\"\"\"\n request_data = \"\"\n response = HttpClient().send_request(method=data['method'], url=f'{HOST}/{data[\"url\"]}',\n param_type='application/json',\n data=request_data, headers={\"Authorization\": f'Bearer<{TOKEN}>'})\n assert response.json()['success'] == data[\"success\"]\n print(response.json())\n\nif __name__ == '__main__':\n pytest.main(['test_sample_msg.py','-sv'])","repo_name":"sobermh/database_request_pytest","sub_path":"case/test_sample_msg.py","file_name":"test_sample_msg.py","file_ext":"py","file_size_in_byte":6014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"22434598157","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*- \n#\n# Post build step\n# Use:\n# post_build.py $(TargetDir) $(PlatformTarget) $(ConfigurationName)\nimport sys, os\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), \"../../../script\")))\nimport Rylogic as Tools\nimport UserVars\n\ntry:\n\tTools.AssertVersion(1)\n\n\tappname = \"constraint\";\n\ttargetdir = sys.argv[1].rstrip('\\\\') if len(sys.argv) > 1 else \"P:\\\\obj\\\\v140\\\\constraint\\\\x64\\\\Debug\"\n\tplatform = sys.argv[2] if len(sys.argv) > 2 else \"x64\"\n\tconfig = sys.argv[3] if len(sys.argv) > 3 else \"Debug\"\n\tif platform.lower() == \"win32\": platform = \"x86\"\n\n\t# Copy dependencies to 'targetdir'\n\tTools.Copy(Tools.Path(UserVars.root, f\"lib\\\\{platform}\\\\{config}\\\\view3d.dll\") , targetdir, only_if_modified=True)\n\tTools.Copy(Tools.Path(UserVars.root, f\"lib\\\\{platform}\\\\{config}\\\\scintilla.dll\"), targetdir, only_if_modified=True)\n\nexcept Exception as ex:\n\tTools.OnException(ex)\n","repo_name":"psryland/rylogic_code","sub_path":"projects/tests/gui/post_build.py","file_name":"post_build.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"31571922974","text":"\"\"\"\noutput.py: The output thread for the miniature framework.\n\"\"\"\nimport threading\n\nidentity = lambda x: x\n\n\nclass OutThread(threading.Thread):\n def __init__(self, N, q, sorting=True, *args, **kwargs):\n \"\"\"Initialize thread and save queue reference.\"\"\"\n threading.Thread.__init__(self, *args, **kwargs)\n self.queue = q\n self.workers = N\n self.sorting = sorting\n self.output = []\n\n def run(self):\n \"\"\"Extract items from the output queue and print all until all done.\"\"\"\n while self.workers:\n p = self.queue.get()\n if p is None:\n self.workers -= 1\n else:\n # This is a real output packet\n self.output.append(p)\n print(\"Final string is {} characters long\".format(len(self.output)))\n print(\"Output thread terminating\")\n","repo_name":"suvari3V/OST_Python4","sub_path":"Lesson_11/Project/output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"4119273164","text":"\"\"\"\nTaken from https://github.com/openai/baselines\n\"\"\"\nfrom enum import Enum, auto\nfrom multiprocessing import Pipe, Process\nfrom multiprocessing.connection import Connection\nfrom typing import Optional\n\nimport numpy as np\n\nfrom envs.base import Env\n\n\nclass CloudpickleWrapper(object):\n \"\"\"\n Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)\n \"\"\"\n\n def __init__(self, x):\n self.x = x\n\n def __getstate__(self):\n import cloudpickle\n\n return cloudpickle.dumps(self.x)\n\n def __setstate__(self, ob):\n import pickle\n\n self.x = pickle.loads(ob)\n\n\nclass Command(Enum):\n ACTION_SPACE = auto()\n CLOSE = auto()\n GET_TASK = auto()\n OBSERVATION_SPACE = auto()\n RESET = auto()\n STEP = auto()\n TASK_SPACE = auto()\n\n\ndef work(env: Env, command: Command, data):\n if command == Command.ACTION_SPACE:\n return env.action_space\n elif command == Command.GET_TASK:\n return env.get_task()\n elif command == Command.OBSERVATION_SPACE:\n return env.observation_space\n elif command == Command.RESET:\n return env.reset()\n elif command == Command.STEP:\n return env.step(data)\n elif command == Command.TASK_SPACE:\n return env.task_space\n raise RuntimeError(f\"Unknown command {command}\")\n\n\ndef worker(\n remote: Connection, parent_remote: Connection, env_fn_wrapper: CloudpickleWrapper\n):\n parent_remote.close()\n env: Env = env_fn_wrapper.x()\n try:\n while True:\n cmd, data = remote.recv()\n if cmd == Command.CLOSE:\n break\n else:\n remote.send(work(env, cmd, data))\n finally:\n env.close()\n remote.close()\n\n\nclass SubprocVecEnv:\n \"\"\"\n VecEnv that runs multiple envs in parallel in subproceses and communicates with them via pipes.\n Recommended to use when num_envs > 1 and step() can be a bottleneck.\n \"\"\"\n\n def __init__(self, env_fns):\n \"\"\"\n Arguments:\n\n env_fns: iterable of callables - functions that create envs to run in subprocesses. Need to be cloud-pickleable\n \"\"\"\n self._n_processes = len(env_fns)\n self.closed = False\n self.waiting = False\n self.remotes: list[Connection]\n self.remotes, self.ps = self.start_processes(env_fns)\n\n def _assert_not_closed(self):\n assert (\n not self.closed\n ), \"Trying to operate on a SubprocVecEnv after calling close()\"\n\n @property\n def action_space(self):\n return self.send_to_first(Command.ACTION_SPACE, None)\n\n @property\n def n_processes(self):\n return self._n_processes\n\n @property\n def observation_space(self):\n return self.send_to_first(Command.OBSERVATION_SPACE, None)\n\n @property\n def task_space(self):\n return self.send_to_first(Command.TASK_SPACE, None)\n\n def close(self):\n if self.waiting:\n for remote in self.remotes:\n remote.recv()\n for remote in self.remotes:\n remote.send((Command.CLOSE, None))\n for p in self.ps:\n p.join()\n self.closed = True\n\n def get_task(self):\n return np.stack(self.send_to_all(Command.GET_TASK, None))\n\n def reset(self, n: Optional[int] = None) -> np.ndarray:\n if n is None:\n return np.stack(self.send_to_all(Command.RESET, None))\n else:\n return self.send_to_nth(n, Command.RESET, None)\n\n def send_to_all(self, command: Command, data):\n self._assert_not_closed()\n if data is None:\n for remote in self.remotes:\n remote.send((command, data))\n else:\n for remote, data in zip(self.remotes, data):\n remote.send((command, data))\n self.waiting = True\n received = [remote.recv() for remote in self.remotes]\n self.waiting = False\n return received\n\n def send_to_first(self, command: Command, data):\n return self.send_to_nth(0, command, data)\n\n def send_to_nth(self, n: int, command: Command, data):\n self._assert_not_closed()\n remote = self.remotes[n]\n remote.send((command, data))\n return remote.recv()\n\n def start_processes(self, env_fns) -> tuple[list, list[Process]]:\n remotes, work_remotes = zip(*[Pipe() for _ in range(len(env_fns))])\n ps = [\n Process(\n target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))\n )\n for (work_remote, remote, env_fn) in zip(work_remotes, remotes, env_fns)\n ]\n for p in ps:\n # if the main process crashes, we should not cause things to hang\n p.daemon = True\n p.start()\n for remote in work_remotes:\n remote.close()\n return remotes, ps\n\n def step(\n self, actions: np.ndarray\n ) -> tuple[np.ndarray, np.ndarray, np.ndarray, list]:\n obs, rews, dones, infos = zip(*self.send_to_all(Command.STEP, actions))\n return np.stack(obs), np.stack(rews), np.stack(dones), infos\n","repo_name":"ethanabrooks/in-context-rl","sub_path":"src/envs/parallel/subproc_vec_env.py","file_name":"subproc_vec_env.py","file_ext":"py","file_size_in_byte":5114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"14344511931","text":"import sys\nimport pygame\nimport random\n\npygame.init()\n\n# Screen dimensions\nWIDTH, HEIGHT = 800, 600\nGRID_SIZE = 25\n\n# Colors\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\nGREEN = (0, 255, 0)\nCOLORS = [RED, BLUE, GREEN]\n\n# Tetromino shapes\nSHAPES = [\n [\n ['.....',\n '.....',\n '.....',\n 'OOOO.',\n '.....'],\n ['.....',\n '..O..',\n '..O..',\n '..O..',\n '..O..']\n ],\n [\n ['.....',\n '.....',\n '..O..',\n '.OOO.',\n '.....'],\n ['.....',\n '..O..',\n '.OO..',\n '..O..',\n '.....'],\n ['.....',\n '.....',\n '.OOO.',\n '..O..',\n '.....'],\n ['.....',\n '..O..',\n '..OO.',\n '..O..',\n '.....']\n ],\n [\n [\n '.....',\n '.....',\n '..OO.',\n '.OO..',\n '.....'],\n ['.....',\n '.....',\n '.OO..',\n '..OO.',\n '.....'],\n ['.....',\n '.O...',\n '.OO..',\n '..O..',\n '.....'],\n ['.....',\n '..O..',\n '.OO..',\n '.O...',\n '.....']\n ],\n [\n ['.....',\n '..O..',\n '..O.',\n '..OO.',\n '.....'],\n ['.....',\n '...O.',\n '.OOO.',\n '.....',\n '.....'],\n ['.....',\n '.OO..',\n '..O..',\n '..O..',\n '.....'],\n ['.....',\n '.....',\n '.OOO.',\n '.O...',\n '.....']\n ],\n]\n\n\nclass Tetromino:\n def __init__(self, x, y, shape):\n self.x = x\n self.y = y\n self.shape = shape\n self.color = random.choice(COLORS) # You can choose different colors for each shape\n self.rotation = 0\n\n\nclass Tetris:\n def __init__(self, width, height):\n self.width = width\n self.height = height\n self.grid = [[0 for _ in range(width)] for _ in range(height)]\n self.current_piece = self.new_piece()\n self.game_over = False\n self.score = 0 # Add score attribute\n\n def new_piece(self):\n # Choose a random shape\n shape = random.choice(SHAPES)\n # Return a new Tetromino object\n return Tetromino(self.width // 2, 0, shape)\n\n def valid_move(self, piece, x, y, rotation):\n \"\"\"Check if the piece can move to the given position\"\"\"\n for i, row in enumerate(piece.shape[(piece.rotation + rotation) % len(piece.shape)]):\n for j, cell in enumerate(row):\n try:\n if cell == 'O' and (self.grid[piece.y + i + y][piece.x + j + x] != 0):\n return False\n except IndexError:\n return False\n return True\n\n def clear_lines(self):\n \"\"\"Clear the lines that are full and return the number of cleared lines\"\"\"\n lines_cleared = 0\n for i, row in enumerate(self.grid[:-1]):\n if all(cell != 0 for cell in row):\n lines_cleared += 1\n del self.grid[i]\n self.grid.insert(0, [0 for _ in range(self.width)])\n return lines_cleared\n\n def lock_piece(self, piece):\n \"\"\"Lock the piece in place and create a new piece\"\"\"\n for i, row in enumerate(piece.shape[piece.rotation % len(piece.shape)]):\n for j, cell in enumerate(row):\n if cell == 'O':\n self.grid[piece.y + i][piece.x + j] = piece.color\n # Clear the lines and update the score\n lines_cleared = self.clear_lines()\n self.score += lines_cleared * 100 # Update the score based on the number of cleared lines\n # Create a new piece\n self.current_piece = self.new_piece()\n # Check if the game is over\n if not self.valid_move(self.current_piece, 0, 0, 0):\n self.game_over = True\n return lines_cleared\n\n def update(self):\n \"\"\"Move the tetromino down one cell\"\"\"\n if not self.game_over:\n if self.valid_move(self.current_piece, 0, 1, 0):\n self.current_piece.y += 1\n else:\n self.lock_piece(self.current_piece)\n\n def draw(self, screen):\n \"\"\"Draw the grid and the current piece\"\"\"\n for y, row in enumerate(self.grid):\n for x, cell in enumerate(row):\n if cell:\n pygame.draw.rect(screen, cell, (x * GRID_SIZE, y * GRID_SIZE, GRID_SIZE - 1, GRID_SIZE - 1))\n\n if self.current_piece:\n for i, row in enumerate(self.current_piece.shape[self.current_piece.rotation % len(self.current_piece.shape)]):\n for j, cell in enumerate(row):\n if cell == 'O':\n pygame.draw.rect(screen, self.current_piece.color, ((self.current_piece.x + j) * GRID_SIZE, (self.current_piece.y + i) * GRID_SIZE, GRID_SIZE - 1, GRID_SIZE - 1))\n\n\ndef draw_score(screen, score, x, y):\n \"\"\"Draw the score on the screen\"\"\"\n font = pygame.font.Font(None, 36)\n text = font.render(f\"Score: {score}\", True, WHITE)\n screen.blit(text, (x, y))\n \n \ndef draw_game_over(screen, x, y):\n \"\"\"Draw the game over text on the screen\"\"\"\n font = pygame.font.Font(None, 48)\n text = font.render(\"Game Over\", True, RED)\n screen.blit(text, (x, y))\n\n\ndef main():\n # Initialize pygame\n screen = pygame.display.set_mode((WIDTH, HEIGHT))\n pygame.display.set_caption('Tetris')\n # Create a clock object\n clock = pygame.time.Clock()\n # Create a Tetris object\n game = Tetris(WIDTH // GRID_SIZE, HEIGHT // GRID_SIZE)\n fall_time = 0\n fall_speed = 50 # You can adjust this value to change the falling speed, it's in milliseconds\n while True:\n # Fill the screen with black\n screen.fill(BLACK) \n for event in pygame.event.get():\n # Check for the QUIT event\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n # Check for the KEYDOWN event\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n if game.valid_move(game.current_piece, -1, 0, 0):\n game.current_piece.x -= 1 # Move the piece to the left\n if event.key == pygame.K_RIGHT:\n if game.valid_move(game.current_piece, 1, 0, 0):\n game.current_piece.x += 1 # Move the piece to the right\n if event.key == pygame.K_DOWN:\n if game.valid_move(game.current_piece, 0, 1, 0):\n game.current_piece.y += 1 # Move the piece down\n if event.key == pygame.K_UP:\n if game.valid_move(game.current_piece, 0, 0, 1):\n game.current_piece.rotation += 1 # Rotate the piece\n if event.key == pygame.K_SPACE:\n while game.valid_move(game.current_piece, 0, 1, 0):\n game.current_piece.y += 1 # Move the piece down until it hits the bottom\n game.lock_piece(game.current_piece) # Lock the piece in place\n # Get the number of milliseconds since the last frame\n delta_time = clock.get_rawtime() \n # Add the delta time to the fall time\n fall_time += delta_time \n if fall_time >= fall_speed:\n # Move the piece down\n game.update()\n # Reset the fall time\n fall_time = 0\n # Draw the score on the screen\n draw_score(screen, game.score, 10, 10)\n # Draw the grid and the current piece\n game.draw(screen)\n if game.game_over:\n # Draw the \"Game Over\" message\n draw_game_over(screen, WIDTH // 2 - 100, HEIGHT // 2 - 30) # Draw the \"Game Over\" message\n # You can add a \"Press any key to restart\" message here\n # Check for the KEYDOWN event\n if event.type == pygame.KEYDOWN:\n # Create a new Tetris object\n game = Tetris(WIDTH // GRID_SIZE, HEIGHT // GRID_SIZE)\n # Update the display\n pygame.display.flip()\n # Set the framerate\n clock.tick(60)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"x4nth055/pythoncode-tutorials","sub_path":"gui-programming/tetris-game/tetris.py","file_name":"tetris.py","file_ext":"py","file_size_in_byte":8299,"program_lang":"python","lang":"en","doc_type":"code","stars":1875,"dataset":"github-code","pt":"4"} +{"seq_id":"21974347492","text":"# -*- coding:utf-8 -*-\n# demo2-hough.py\n# zhengyinloong\n# 2023/04/28 07:29\n\"\"\"\nHough直线检测\n\"\"\"\nfrom mycv import *\n\n\nif __name__==\"__main__\":\n\n IMG1 = \"./10.jpg\"\n IMG2 = \"./11.jpg\"\n IMG3 = \"./12.jpg\"\n IMGS = [IMG1,IMG2,IMG3]\n\n for i,IMG in enumerate(IMGS):\n img = cv.imread(IMG, cv.IMREAD_GRAYSCALE)\n # Canny 边缘检测\n edge = edge_detecting(img)\n # Hough直线检测\n lines = get_lines(edge)\n # 画线\n img = cv.imread(IMG)\n hough = draw_lines(img, lines)\n # 显示\n cv.imshow(f'edge{i+1}', edge)\n cv.imshow(f'hough{i+1}', hough)\n cv.resizeWindow(f'edge{i+1}', 350, 350)\n cv.resizeWindow(f'hough{i+1}', 350, 350)\n # 保存\n cv.imwrite(f'edge{i+1}.jpg', edge, [cv.IMWRITE_JPEG_QUALITY, 100])\n cv.imwrite(f'hough{i+1}.jpg', hough, [cv.IMWRITE_JPEG_QUALITY, 100])\n\n # Wait until a key pressed\n cv.waitKey(0)\n\n # Destroy all the windows opened before\n cv.destroyAllWindows()\n\n\n\n","repo_name":"zhengyinloong/IPMV","sub_path":"test2/demo2-hough.py","file_name":"demo2-hough.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"32087477807","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 3 13:51:27 2022\n\n@author: SebastianCB-dev\n\"\"\"\n\ndef calcular_cambio2(pesos:int)->str:\n cambio = ''\n while pesos > 0:\n if pesos - 500 >= 0:\n cambio += 'A,' \n pesos -= 500\n elif pesos - 200 >= 0:\n cambio += 'B,' \n pesos -= 200\n elif pesos - 100 >= 0:\n cambio += 'C,' \n pesos -= 100\n elif pesos - 50 >= 0:\n cambio += 'D,'\n pesos -= 50 \n if len(cambio) > 1:\n cambio = cambio[:-1]\n return cambio\n\n#result = calcular_cambio2(450)\n#print(f'Su cambio es: {result}')\n\n\n\ndef calcular_cambio(pesos:int)->str:\n quantityA = 0\n quantityB = 0\n quantityC = 0\n quantityD = 0\n while pesos > 0:\n if pesos - 500 >= 0:\n quantityA += 1 \n pesos -= 500\n elif pesos - 200 >= 0:\n quantityB += 1 \n pesos -= 200\n elif pesos - 100 >= 0:\n quantityC += 1 \n pesos -= 100\n elif pesos - 50 >= 0:\n quantityD += 1 \n pesos -= 50 \n \n return f'{quantityA},{quantityB},{quantityC},{quantityD}'\n\nresult = calcular_cambio(850)\nprint(f'Su cambio es: {result}')","repo_name":"SebastianCB-dev/python-universidad-andes","sub_path":"Modulo1/ejercicioMaquinaEspendedora.py","file_name":"ejercicioMaquinaEspendedora.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"8008553506","text":"import autograd\nimport autograd.numpy as np\nimport autograd.scipy as sp\nimport autograd.misc.optimizers\nimport numpy\nimport time\nimport os\nimport sys\nfrom tqdm import tqdm\nfrom matplotlib import pyplot as plt\nfrom autograd.misc.optimizers import adam\n#from pynverse import inversefunc\n\ne_bound = []\njoint_probs = []\nflow_probs = []\ngrad_norms = []\nm = 10000\nstart = time.time()\nq_0_mu = np.array([0,0])\nq_0_sigma = 1\nD = q_0_mu.shape[0]\ndef callback(x, i, g, num_samples=100):\n '''\n Callback function used in Adam solver. Has functionality to plot intermediate steps\n and do progress bar\n '''\n grad_norms.append(np.linalg.norm(g))\n if(i%10 == 0):\n left = '['\n right = ']'\n eq = '=' * int(20*i/m)\n blank = ' ' * int(np.ceil(20*(1 - i/m)))\n sys.stdout.write(\"{0}{1}{2}{3} {4:.3f}% {5:.2f}s\\r\".format(\n left, eq, blank, right, 100*i/m, time.time()-start))\n sys.stdout.flush()\n if(i==(m-1)):\n sys.stdout.write(\"{}\\r\".format(' '*50))\n sys.stdout.flush()\n print(\"[{}] 100% {}\".format(20*'=', time.time() - start))\n if(i%10 == 0):\n if(i==0):\n leading_zeros = int(np.log(m)/np.log(10))\n elif(i==1000):\n leading_zeros = int(np.log(m)/np.log(10)) - int(np.log(i)/np.log(10)) - 1\n # else:\n # leading_zeros = int(np.log(m)/np.log(10)) - int(np.log(i)/np.log(10))\n # zeros = '0'*leading_zeros\n # new_samples = np.random.randn(num_samples)[:,np.newaxis]\n # #new_samples = np.random.uniform(-1, 1, num_samples)[:,np.newaxis]\n # #new_samples = np.random.multivariate_normal(q_0_mu, q_0_sigma*np.eye(D), num_samples)\n # flowed_samples = flow_samples(x, new_samples, np.tanh)\n # fig, ax = plt.subplots()\n # ax.hist(flowed_samples, bins=int(np.sqrt(num_samples)), density=True)\n # #ax = setup_plot(u1)\n # #ax.scatter(flowed_samples[:,0], flowed_samples[:,1], alpha=0.4)\n # #ax.set(xlim=(-4,4), ylim=(-4,4), title=\"{} Flows, Iteration {}\".format(20, i))\n # #plt.savefig(\"./data_fit_1d/{}{}.png\".format(zeros, i))\n # plt.close()\n\n\n###\n# Toy density functions\n###\ndef w1(z):\n return np.sin(2*np.pi*z/4)\n\ndef w2(z):\n return 3*np.exp(-1/2*((z-1)/0.6)**2)\n\ndef u1(z, N=1):\n exp_factor = 1/2*((np.linalg.norm(z, axis=2) - 2)/0.4)**2 - \\\n np.log(np.exp(-1/2*((z[:,:,0] - 2)/0.6)**2) + np.exp(-1/2*((z[:,:,0] + 2)/0.6)**2))\n return N * np.exp(-exp_factor)\n\n\ndef u2(z, N=1):\n exp_factor = 1/2*((z[:,:,1] - np.sin(2*np.pi*z[:,:,0]/4))/0.4)**2\n return np.exp(-exp_factor)\n\n\ndef u3(z, N=1):\n exp_factor = -np.log(np.exp(-1/2*((z[:,:,1] - w1(z[:,:,0]))/0.35)**2) + \\\n np.exp(-1/2*((z[:,:,1] - w1(z[:,:,0]) + w2(z[:,:,0]))/0.35)**2))\n return np.exp(-exp_factor)\n\n\ndef setup_plot(u_func):\n '''\n Function used to set up plot of target density, returns axis object for additional\n plotting\n '''\n try:\n X, Y = numpy.mgrid[-4:4:0.05, -4:4:0.05]\n dat = np.dstack((X, Y))\n U_z1 = u_func(dat)\n \n fig, ax = plt.subplots()\n ax.contourf(X, Y, U_z1, cmap='Reds', levels=15)\n except (TypeError, ValueError):\n plt.close()\n x = np.linspace(-8, 8, 1000)\n fig, ax = plt.subplots()\n ax.plot(x, u_func(x), label=\"Target Distribution\")\n ax.set(title=\"Comparison of Target Density and Flowed Samples\")\n return ax\n\n\ndef plot_shape():\n '''\n Simply plots target density\n '''\n ax = setup_plot(u_func)\n plt.show()\n\n\ndef plot_shape_samples(samples, u_func):\n '''\n plots target density and samples\n '''\n ax = setup_plot(u_func)\n ax.scatter(samples[:, 0], samples[:, 1], alpha=.5)\n plt.show()\n\n\ndef flow_once(lambda_flow, z, h):\n '''\n Flow one planar tranfsormation flow\n '''\n D = (lambda_flow.shape[0]-1)//2\n return z + np.dot(h(np.dot(z, lambda_flow[D:2*D].reshape(-1, 1))+lambda_flow[-1]), \\\n lambda_flow[:D].reshape(1, -1))\n\n\n# lambda u, w, b\ndef flow_samples(lambda_flows, z, h):\n '''\n Transform sample through multiple flows\n '''\n D = (lambda_flows.shape[1]-1)//2\n for lambda_flow in lambda_flows:\n z = flow_once(lambda_flow, z, h)\n return z\n\n\n# Psi\ndef psi(lambda_flow, z, h):\n '''\n Computes log-det-jacobian according to formula from the paper\n '''\n D = (lambda_flow.shape[0]-1)//2\n return (1-h(np.dot(z, lambda_flow[D:2*D].reshape(-1, 1))+lambda_flow[-1])**2) * \\\n lambda_flow[D:2*D]\n\n\n# Calculate energy bound\ndef energy_bound(lambda_flows, z, h, u_func, beta=1., bnn=False):\n '''\n Energy bound formula from the paper. We exclude the initial sampling contribution\n because it is independent of flow parameters.\n '''\n D = (lambda_flows.shape[1]-1)//2\n #initial_exp = np.mean(np.log(sp.stats.norm.pdf(z, loc=q_0_mu, scale=np.sqrt(q_0_sigma))))\n initial_exp = 0\n if(bnn):\n joint_exp = beta*np.mean(u_func(flow_samples(lambda_flows, z, h)))\n else:\n joint_exp = beta*np.mean(np.log(u_func(flow_samples(lambda_flows, z, h\n ).reshape(1, -1, 2))))\n #print(\"JOINT EXP: {}\".format(joint_exp))\n\n # log-det-jacobian contribution from the paper\n flow_exp = 0\n for k, lambda_flow in enumerate(lambda_flows):\n flow_exp = flow_exp + \\\n np.mean(np.log(np.abs(1 + np.dot(psi(lambda_flow, z, h), lambda_flow[:D]))))\n z = flow_once(lambda_flow, z, h)\n\n # Store probabilities for plotting and analysis\n e_bound.append((initial_exp - joint_exp - flow_exp)._value)\n joint_probs.append(joint_exp._value)\n flow_probs.append(flow_exp._value)\n return initial_exp - joint_exp - flow_exp\n\n\ndef get_joint_exp(lambda_flows, z, h, u_func):\n '''\n Get joint contribution to energy for gradient descent according to formula\n from the paper\n '''\n return np.mean(np.log(u_func(flow_samples(lambda_flows, z, h).reshape(1, -1, 2))))\n\n\ndef get_flow_exp(lambda_flows, z, h):\n '''\n Get flow contribution to energy function for gradient descent\n '''\n D = (lambda_flows.shape[1]-1)//2\n flow_exp = 0\n for lambda_flow in lambda_flows:\n flow_exp = flow_exp + \\\n np.mean(np.log(np.abs(1 + np.dot(psi(lambda_flow, z, h), lambda_flow[:D]))))\n z = flow_once(lambda_flow, z, h)\n return flow_exp\n\n\ndef gradient_descent(m, lambda_flows, grad_energy_bound, samples):\n '''\n Gradient descent for finding parameters. This may not work anymore since switching over\n to the Adam optimizer.\n '''\n energy_hist = np.empty(m)\n joint_hist = np.empty(m)\n flow_hist = np.empty(m)\n lambda_hist = np.empty((m, *lambda_flows.shape))\n samples_flowed = samples\n for i in tqdm(range(m)):\n beta = min(1, 0.01+i/10000)\n samples_flowed = flow_samples(lambda_flows, samples, h)\n \n gradient = grad_energy_bound(lambda_flows, samples, h, beta)\n lambda_flows -= step_size*gradient\n #lambda_flows = autograd.misc.optimizers.adam(grad_energy_bound, lambda_flows)\n \n # Debug\n energy_hist[i] = energy_bound(lambda_flows, samples, h)\n joint_hist[i] = get_joint_exp(lambda_flows, samples, h)\n flow_hist[i] = get_flow_exp(lambda_flows, samples, h)\n lambda_hist[i] = lambda_flows\n \n # Plot\n if i % 20 == 0:\n if(i==0):\n leading_zeros = int(np.log(m)/np.log(10))\n elif(i==1000):\n leading_zeros = int(np.log(m)/np.log(10)) - int(np.log(i)/np.log(10)) - 1\n else:\n leading_zeros = int(np.log(m)/np.log(10)) - int(np.log(i)/np.log(10))\n zeros = '0'*leading_zeros\n\n ax = setup_plot(u_func)\n ax.scatter(samples_flowed[:, 0], samples_flowed[:, 1], alpha=.5)\n plt.savefig(\"./plots/{}{}.png\".format(zeros, i))\n plt.close()\n\n\ndef adam_solve(lambda_flows, grad_energy_bound, samples, u_func, h, m=1000, step_size=0.001,\n bnn=False):\n '''\n Uses adam solver to optimize the energy bound\n '''\n output = np.copy(lambda_flows) # Copies to avoid changing initial conditions\n print(\"BEFORE LEARNING:\\n{}\".format(output))\n grad_energy_bound = autograd.grad(energy_bound) # Autograd gradient of energy\n g_eb = lambda lambda_flows, i: grad_energy_bound(lambda_flows, samples, h, u_func, \n #beta= (0.1 + i/1000))\n beta=min(2, i/1000), # Annealing\n #beta=min(1, 0.01+i/10000),\n bnn=bnn) # Annealing\n output = adam(g_eb, output, num_iters=m, callback=callback, step_size=step_size)\n print(\"\\nAFTER LEARNING:\\n{}\".format(output))\n\n #samples = np.random.randn(30000)[:,np.newaxis] # Plot with more samples for better clarity\n q_0_mu = np.array([0,0])\n q_0_sigma = 1\n D = q_0_mu.shape[0]\n #samples = np.random.multivariate_normal(q_0_mu, q_0_sigma*np.eye(D), 20000)\n\n samples_flowed = flow_samples(output, samples, h)\n #np.savetxt(\"./data_fit_1d/flow_params.txt\", output)\n np.savetxt(\"./nn_fit/flow_params.txt\", output)\n if(bnn):\n np.savetxt(\"./nn_fit/energy_bound.txt\", e_bound)\n fig, ax = plt.subplots()\n ax.plot(e_bound)\n ax.set(title=\"Energy Bound\")\n plt.savefig(\"./nn_fit/energy_bound.png\")\n plt.close()\n\n np.savetxt(\"./nn_fit/joint_probs.txt\", joint_probs)\n fig, ax = plt.subplots()\n ax.plot(joint_probs)\n ax.set(title=\"Joint Probability\")\n plt.savefig(\"./nn_fit/joint_probs.png\")\n plt.close()\n\n np.savetxt(\"./nn_fit/flow_probs.txt\", flow_probs)\n fig, ax = plt.subplots()\n ax.plot(flow_probs)\n ax.set(title=\"Flow Probs\")\n plt.savefig(\"./nn_fit/flow_probs.png\")\n plt.close()\n\n np.savetxt(\"./nn_fit/grad_norms.txt\", grad_norms)\n fig, ax = plt.subplots()\n ax.plot(grad_norms)\n ax.set(title=\"Gradient Norms\")\n plt.savefig(\"./nn_fit/grad_norms.png\")\n plt.close()\n\n\n return samples_flowed\n\n\ndef shape_fit_2d(m, step_size, u_func, num_flows=8, num_samples=1000):\n # Parameters\n h = np.tanh\n \n q_0_mu = np.array([0,0])\n q_0_sigma = 1\n D = q_0_mu.shape[0]\n\n # 2D flows\n #lambda_flows = np.array([np.array([1., 0., 4., 5., 0.])]*num_flows)\n lambda_flows = np.array([np.array([1., 1., 0., 0., 0.])]*num_flows)\n\n # 2D samples\n samples = np.random.multivariate_normal(q_0_mu, q_0_sigma*np.eye(D), num_samples)\n\n # Gradient of energy function -> used to minimize energy\n grad_energy_bound = autograd.grad(energy_bound)\n\n #gradient_descent(m, lambda_flows, grad_energy_bound, samples)\n flowed_samples = adam_solve(lambda_flows, grad_energy_bound, samples,\n u_func, h, m, step_size)\n\n os.system(\"cd ./plots/ ; convert -delay 10 -loop 0 *.png learning_flows.gif\")\n # Plot Transformed samples\n ax = setup_plot(u_func)\n #print(samples_flowed.shape)\n ax.scatter(flowed_samples[:,0], flowed_samples[:,1], alpha=0.2)\n ax.set(title=\"{} Flows, {} Iterations, {} Step Size\".format(num_flows, m, step_size),\n xlim=(-4,4), ylim=(-4,4))\n plt.savefig(\"./2d_plots/adam_fit_test.png\")\n\n # Convert plots to gif or mp4\n #plot_str = \"cd ./plots/ ; ffmpeg -pattern_type glob -i \\\"*.png\\\" -c:v \"\n #plot_str += \"libx264 -pix_fmt yuv420p -movflags +faststart learning_flows.mp4\"\n #os.system(plot_str)\n\n\ndef shape_fit_1d(m, step_size, u_func, num_flows=8, num_samples=1000):\n # Parameters\n h = np.tanh\n \n q_0_mu = np.array([0,0])\n q_0_sigma = 10\n D = q_0_mu.shape[0]\n\n # flows\n #lambda_flows = np.array([np.array([1., 1., 1., 1., 0.])])\n #lambda_flows = np.array([np.array([1., 1., 0.])]*num_flows)\n lambda_flows = np.loadtxt(\"./data_fit_1d/flow_params.txt\")\n\n # 1D samples\n samples = np.random.randn(num_samples)[:,np.newaxis]\n #samples = np.random.uniform(-1, 1, num_samples)[:,np.newaxis]\n \n start = time.time()\n grad_energy_bound = autograd.grad(energy_bound)\n\n # JOINT PROBABILITY IS NEW U_FUNC\n #print(energy_bound(lambda_flows, samples, h, u_func))\n\n #target = lambda x: (sp.stats.norm.pdf((x-2)) + sp.stats.norm.pdf((x+2)))/2\n\n #gradient_descent(m, lambda_flows, grad_energy_bound, samples)\n flowed_samples = adam_solve(lambda_flows, grad_energy_bound, samples,\n u_func, h, m, step_size)\n\n # Plot Transformed samples\n ax = setup_plot(u_func)\n ax.hist(flowed_samples, bins=100, alpha=0.5, density=True, label=\"Transformed Samples\")\n #plt.savefig(\"./plots/adam_fit_test.png\")\n ax.legend(loc='best')\n plt.savefig(\"./data_fit_1d/adam_fit.png\")\n\n # Convert plots to gif or mp4\n #os.system(\"cd ./plots/ ; convert -delay 10 -loop 0 *.png learning_flows.gif\")\n #plot_str = \"cd ./plots/ ; ffmpeg -pattern_type glob -i \\\"*.png\\\" -c:v \"\n #plot_str += \"libx264 -pix_fmt yuv420p -movflags +faststart learning_flows.mp4\"\n #os.system(plot_str)\n\n\nif __name__ == '__main__':\n m = 14000\n m = 10000\n m = 1\n u_func = u1 # 2D Shape fit\n #u_func = lambda x: (sp.stats.norm.pdf((x-4)) + sp.stats.norm.pdf((x+4)))/2 # 1D Shape fit\n #u_func = lambda x: sp.stats.gamma.pdf(x, 1)\n #u_func = lambda x: sp.stats.laplace.pdf(x, 4)\n u_func = lambda x: (1/2*np.exp(-np.abs(x-2)) + 1/2*np.exp(-np.abs(x)) + \\\n 1/2*np.exp(-np.abs(x+2)))/3\n\n #target = lambda x: (sp.stats.norm.pdf((x-4)) + sp.stats.norm.pdf((x+4)))/2 # 1D Shape fit\n #u_func = lambda x, z: (sp.stats.norm.pdf(x-4-z) + sp.stats.norm.pdf(x-4-z))/2 * \\\n # (sp.stats.norm.pdf(x-4) + sp.stats.norm.pdf(x-4))/2\n print(\"SAMPLING\")\n\n num_samples = 20000\n num_samples = 2000\n #x_dat = sample(target, 1, -8, 8, num_samples)\n\n step_size = .0002\n num_flows = 20\n start = time.time()\n #shape_fit_2d(m, step_size, u_func, num_flows, num_samples)\n shape_fit_1d(m, step_size, u_func, num_flows, num_samples)\n \n fig, ax = plt.subplots(nrows=4, figsize=(8,20))\n ax[0].plot(grad_norms, label=\"Norm of gradient\")\n ax[0].legend(loc='best')\n ax[1].plot(e_bound, label=\"Energy Bound\")\n ax[1].legend(loc='best')\n ax[2].plot(joint_probs, label=\"Joint Probability\")\n ax[2].legend(loc='best')\n ax[3].plot(flow_probs, label=\"Flow Probability\")\n ax[3].legend(loc='best')\n #plt.savefig(\"./data_fit_1d/probabilities.png\")\n plt.savefig(\"./2d_plots/probabilities.png\")\n plt.show()\n\n","repo_name":"onefishy/am207_fall19_projects","sub_path":"normalizing_flow/normalizing_flow_2/scripts/flows.py","file_name":"flows.py","file_ext":"py","file_size_in_byte":14801,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"49"} +{"seq_id":"70189445590","text":"# -*- coding: utf-8 -*-\n\"\"\"Test ApiGW\"\"\"\nimport pytest\nfrom ddd_nginx.upstream import Upstream\n\n\ndef test_create_upstream():\n up = Upstream(name=\"upstream name\")\n up.append(\"server url\")\n\n assert up is not None\n assert up.id is not None\n assert up.name == \"upstream name\"\n assert up.servers[0] == \"server url\"\n\n\n@pytest.mark.usefixtures(\"upstream_conf\")\ndef test_dump_upstream(upstream_conf):\n up = Upstream(\n name=\"warehouse_inventory\",\n )\n up.append(\"10.0.0.1:80\")\n up.append(\"10.0.0.2:80\")\n up.append(\"10.0.0.3:80\")\n\n assert up.dump(\"upstream.conf.jinja2\", {\n \"name\": up.name,\n \"servers\": up.servers,\n }) == upstream_conf\n","repo_name":"sunwei/ddd-nginx","sub_path":"tests/test_upstream.py","file_name":"test_upstream.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"27401511320","text":"# helper functions\nfrom sklearn.base import BaseEstimator, TransformerMixin\nimport numpy as np\nfrom collections import defaultdict\nimport pandas as pd\n\ndef create_validation_submission(df, week):\n val_week_purchases_by_cust = defaultdict(list)\n valid = df.copy()\n valid.t_dat = pd.to_datetime( valid.t_dat )\n valid = valid.loc[ valid.week == week ]\n valid = valid.rename({'article_id':'prediction'},axis=1)\n valid = valid.groupby('customer_id').prediction.apply(list).to_dict()\n \n return valid\n\ndef apk(actual, predicted, k=10, perfect=False):\n \"\"\"\n Computes the average precision at k.\n\n This function computes the average prescision at k between two lists of\n items.\n\n Parameters\n ----------\n actual : list\n A list of elements that are to be predicted (order doesn't matter)\n predicted : list\n A list of predicted elements (order does matter)\n k : int, optional\n The maximum number of predicted elements\n\n Returns\n -------\n score : double\n The average precision at k over the input lists\n\n \"\"\"\n if not perfect and len(predicted)>k:\n predicted = predicted[:k]\n\n score = 0.0\n num_hits = 0.0\n if perfect:\n for i, a in enumerate(actual[:k]):\n if a in predicted and a not in actual[:i]:\n score += 1 \n else:\n for i,p in enumerate(predicted):\n if p in actual and p not in predicted[:i]:\n num_hits += 1.0\n score += num_hits / (i+1.0)\n\n if not actual:\n return 0.0\n\n return score / min(len(actual), k)\n\ndef mapk(actual, predicted, k=10):\n \"\"\"\n Computes the mean average precision at k.\n\n This function computes the mean average prescision at k between two lists\n of lists of items.\n\n Parameters\n ----------\n actual : list\n A list of lists of elements that are to be predicted \n (order doesn't matter in the lists)\n predicted : list\n A list of lists of predicted elements\n (order matters in the lists)\n k : int, optional\n The maximum number of predicted elements\n\n Returns\n -------\n score : double\n The mean average precision at k over the input lists\n\n \"\"\"\n return np.mean([apk(a,p,k) for a,p in zip(actual, predicted)])\n\n# https://www.kaggle.com/c/h-and-m-personalized-fashion-recommendations/discussion/308635\ndef customer_hex_id_to_int(series):\n return series.str[-16:].apply(hex_id_to_int)\n\ndef hex_id_to_int(str):\n return int(str[-16:], 16)\n\ndef article_id_str_to_int(series):\n return series.astype('int32')\n\ndef article_id_int_to_str(series):\n return '0' + series.astype('str')\n\nclass Categorize(BaseEstimator, TransformerMixin):\n def __init__(self, min_examples=0):\n self.min_examples = min_examples\n self.categories = []\n \n def fit(self, X):\n for i in range(X.shape[1]):\n vc = X.iloc[:, i].value_counts()\n self.categories.append(vc[vc > self.min_examples].index.tolist())\n return self\n\n def transform(self, X):\n data = {X.columns[i]: pd.Categorical(X.iloc[:, i], categories=self.categories[i]).codes for i in range(X.shape[1])}\n return pd.DataFrame(data=data)","repo_name":"jimmy86204/kaggle-H-M-Personalized-Fashion-Recommendations","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"33337437562","text":"from PySide6 import QtCore, QtWidgets, QtGui\n\nfrom gui.design.WidgetTitleBar import Ui_TitleBar as Bar\n\n\nclass TitleBar(Bar, QtWidgets.QWidget):\n\n def __init__(self, parent: QtWidgets.QMainWindow | QtWidgets.QDialog):\n super().__init__()\n self.setupUi(self)\n self.parent = parent\n self.WindowMinButton.clicked.connect(self.parent.showMinimized)\n self.WindowMaxButton.clicked.connect(self.maxOrNormal)\n self.WindowCloseButton.clicked.connect(self.queryExit)\n # 切换最大化与正常大小\n\n def maxOrNormal(self):\n if self.parent.isMaximized():\n self.parent.showNormal()\n else:\n self.parent.showMaximized()\n\n # 弹出警告提示窗口确认是否要关闭\n def queryExit(self):\n self.parent.close()\n\n def mouseDoubleClickEvent(self, a0: QtGui.QMouseEvent): # 鼠标双击事件\n if a0.button() == QtCore.Qt.MouseButton.LeftButton:\n self.maxOrNormal()\n","repo_name":"Icexbb/SekaiSubtitle-Python","sub_path":"src/gui/widgets/widget_titlebar.py","file_name":"widget_titlebar.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"49"} +{"seq_id":"28056660762","text":"from abc import ABCMeta, abstractmethod\nfrom enum import Enum\nfrom itertools import chain\n\nimport numpy as np\n\nfrom fuzzy_logic.utils import rotate_matrix, contains_any_no_voids, \\\n first_values_class\n\n\nclass Criteria(Enum):\n __metaclass__ = ABCMeta\n\n @staticmethod\n @abstractmethod\n def get_fuzzy_membership_map():\n raise NotImplementedError()\n\n @classmethod\n def get_involved_terms(cls, *values):\n actions_map = cls.get_fuzzy_membership_map()\n xs = np.array(values)\n involved_terms = set()\n\n for term, fuzzy_fn in actions_map.items():\n fuzzy_membership_values = fuzzy_fn(xs)\n\n if contains_any_no_voids(fuzzy_membership_values):\n involved_terms.add(term)\n\n return involved_terms\n\n def call_fuzzy_membership_fn(self, *values):\n xs = np.array(values)\n fuzzy_membership_fn = self.get_fuzzy_membership_map()[self]\n\n return fuzzy_membership_fn(xs)\n\n def get_sample_cut_of_ys(self, min_value, num_of_samples):\n xs = np.linspace(0, 1, num_of_samples)\n ys = self.call_fuzzy_membership_fn(*xs)\n\n return [y if y <= min_value else min_value for y in ys]\n\n\nclass MultiCriteriaAffiliationSelector(object):\n def __init__(self, criterion_values, expert_rules, num_of_samples):\n self.__criteria = criterion_values\n self.__expert_rules = expert_rules\n self.__num_of_samples = num_of_samples\n self.__affirmation_criteria = first_values_class(expert_rules)\n\n def membership_criteria(self):\n selected_terms = self.get_terms()\n involved_rules = self.select_involved_rules(selected_terms)\n affirmation_map = self.__build_affirmation_map(involved_rules)\n values = self.__calculate_affirmation_rule_values(affirmation_map)\n total_affirmation_value = self.__total_affirmation_value(values)\n\n return self.__get_membership_criteria(total_affirmation_value)\n\n def affiliation_criterion(self):\n criteria = self.membership_criteria()\n\n return max(criteria, key=criteria.get)\n\n def get_terms(self):\n terms = set()\n\n for criterion, value in self.__criteria.items():\n criterion_terms = criterion.get_involved_terms(value)\n terms.update(criterion_terms)\n\n return terms\n\n def select_involved_rules(self, involved_terms):\n return {rule_terms: affirmation\n for rule_terms, affirmation in self.__expert_rules.items()\n if len(set(rule_terms) - involved_terms) == 0}\n\n def __get_membership_criteria(self, value):\n affirmation_criteria = self.__affirmation_criteria\n criteria = {}\n\n for term in affirmation_criteria:\n values = affirmation_criteria.call_fuzzy_membership_fn(term, value)\n criteria[term], *_ = values\n\n return criteria\n\n def __get_min_membership_term_value(self, terms):\n values = (term.call_fuzzy_membership_fn(self.__criteria[type(term)])\n for term in terms)\n\n return min(chain.from_iterable(values))\n\n def __build_affirmation_map(self, rules):\n affirmation_map = {}\n\n for rule_terms, affirmation_term in rules.items():\n min_value = self.__get_min_membership_term_value(rule_terms)\n value = affirmation_term.get_sample_cut_of_ys(\n min_value, self.__num_of_samples)\n\n affirmation_map[rule_terms] = value\n\n return affirmation_map\n\n @staticmethod\n def __calculate_affirmation_rule_values(affirmation_map):\n matrix = list(affirmation_map.values())\n\n return [max(i) for i in rotate_matrix(matrix)]\n\n @staticmethod\n def __total_affirmation_value(values):\n total = 0\n max_key_value = len(values) - 1\n\n for key, value in enumerate(values):\n total += ((key / max_key_value) * value)\n\n return total / sum(values)\n","repo_name":"5x/fuzzy-logic-multi-criteria-decisions","sub_path":"fuzzy_logic/multi_criteria_affiliation.py","file_name":"multi_criteria_affiliation.py","file_ext":"py","file_size_in_byte":3930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"39393687341","text":"# -*- mode: python ; coding: utf-8 -*-\n# from PyInstaller.building.api import PYZ, EXE, COLLECT\n# from PyInstaller.building.build_main import Analysis\n\n\nDEBUG: bool = True\n\n\nanalysis = Analysis(\n\t[ 'src\\\\compiler.py' ],\n\tpathex=[ ],\n\tbinaries=[ ],\n\tdatas=[ ],\n\thiddenimports=[\n\t\t'ast_',\n\t\t'backend.interpreter',\n\t\t'backend.llvm',\n\t\t'backend.wasm',\n\t],\n\thookspath=[ ],\n\thooksconfig={},\n\truntime_hooks=[ ],\n\texcludes=[\n\t\t'asyncio',\n\t\t'bz2',\n\t\t'socket',\n\t\t'ssl',\n\t\t'lzma',\n\t],\n\twin_no_prefer_redirects=False,\n\twin_private_assemblies=False,\n\tnoarchive=False\n)\n\npyz = PYZ(\n\tanalysis.pure,\n\tanalysis.zipped_data,\n\tcipher=None\n)\n\nexe = EXE(\n\tpyz,\n\tanalysis.scripts,\n\t[ ],\n\texclude_binaries=True,\n\tname='endcc',\n\tdebug=DEBUG,\n\tbootloader_ignore_signals=False,\n\tstrip=not DEBUG,\n\tupx=not DEBUG,\n\tconsole=True,\n\tdisable_windowed_traceback=False,\n\ttarget_arch=None,\n\tcodesign_identity=None,\n\tentitlements_file=None\n)\n\ncoll = COLLECT(\n\texe,\n\tanalysis.binaries,\n\tanalysis.zipfiles,\n\tanalysis.datas,\n\tstrip=not DEBUG,\n\tupx=not DEBUG,\n\tupx_exclude=[ ],\n\tname='endcc'\n)\n","repo_name":"ENDERZOMBI102/endc-lang","sub_path":"compiler.spec","file_name":"compiler.spec","file_ext":"spec","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"49"} +{"seq_id":"27748868334","text":"# region Imports\n# external modules\nimport os\nfrom typing import Union\nimport zipfile\nfrom collections import defaultdict\nimport uuid\nimport json\n\n# blender modules\nimport bpy\nfrom bpy.types import Operator\nfrom bpy.props import StringProperty, BoolProperty, EnumProperty, CollectionProperty\nfrom bpy_extras.io_utils import ImportHelper, ExportHelper\n\n# relative imports\nfrom .. import functions, properties, icon_manager, ui_functions, keymap\nfrom . import shared\nfrom ..functions.shared import get_preferences\nfrom ..log import logger\nfrom ..keymap import keymaps\n# endregion\n\n\n# region Operators\n\n\nclass AR_OT_gloabal_recategorize_action(shared.Id_based, Operator):\n bl_idname = \"ar.global_recategorize_action\"\n bl_label = \"Recategoize Action Button\"\n bl_description = \"Reallocate the selected Action to another Category\"\n\n @classmethod\n def poll(cls, context: bpy.types.Context):\n ActRec_pref = get_preferences(context)\n return len(ActRec_pref.global_actions) and len(ActRec_pref.get(\"global_actions.selected_ids\", []))\n\n def invoke(self, context: bpy.types.Context, event: bpy.types.Event):\n return context.window_manager.invoke_props_dialog(self)\n\n def execute(self, context: bpy.types.Context):\n ActRec_pref = get_preferences(context)\n categories = ActRec_pref.categories\n ids = functions.get_global_action_ids(ActRec_pref, self.id, self.index)\n self.clear()\n if all(category.selected for category in categories):\n return {\"CANCELLED\"}\n for category in categories:\n if category.selected:\n for id in set(ids).difference(x.id for x in category.actions):\n new_action = category.actions.add()\n new_action.id = id\n else:\n for id in ids:\n category.actions.remove(category.actions.find(id))\n if ActRec_pref.autosave:\n functions.save(ActRec_pref)\n context.area.tag_redraw()\n return {\"FINISHED\"}\n\n def draw(self, context: bpy.types.Context):\n ActRec_pref = get_preferences(context)\n categories = ActRec_pref.categories\n layout = self.layout\n for category in categories:\n layout.prop(category, 'selected', text=category.label)\n\n\nclass AR_OT_global_import(Operator, ImportHelper):\n bl_idname = \"ar.global_import\"\n bl_label = \"Import\"\n bl_description = \"Import the Action file into the storage\"\n\n filter_glob: StringProperty(default='*.zip;*.json', options={'HIDDEN'})\n\n category: StringProperty(default=\"Imports\")\n mode: EnumProperty(\n name='Mode',\n items=[\n (\"add\", \"Add\", \"Add to the current Global data\"),\n (\"overwrite\", \"Overwrite\", \"Remove the current Global data\")\n ]\n )\n include_keymap: BoolProperty(\n name=\"Include Shortcuts\",\n description=\"Include the added Shortcuts of the actions\",\n default=True\n )\n\n def get_macros_from_file(self, context: bpy.types.Context, zip_file: zipfile.ZipFile, path: str) -> list:\n \"\"\"\n Extract macros from the path inside the given zip-file\n\n Args:\n context (bpy.types.Context): active blender context\n zip_file (zipfile.ZipFile): zip file to extract the macros from\n path (str): path to the file with macros inside the zip file\n\n Returns:\n list: extracted macros, macros are of type dict\n \"\"\"\n lines = zip_file.read(path).decode(encoding=\"utf-8\").splitlines()\n macros = []\n for line in lines:\n data = {'id': uuid.uuid1().hex, 'active': True, 'icon': 0}\n data['command'] = line\n label = functions.get_name_of_command(context, line)\n data['label'] = label if isinstance(label, str) else line\n macros.append(data)\n return macros\n\n def execute(self, context: bpy.types.Context):\n # REFACTOR indentation\n ActRec_pref = get_preferences(context)\n\n # Try to load import settings and check if file is valid\n if (not len(ActRec_pref.import_settings)\n and bpy.ops.ar.global_import_settings(\n 'EXEC_DEFAULT',\n filepath=self.filepath,\n from_operator=True) == {'CANCELLED'}):\n self.report({'ERROR'}, \"Selected file is incompatible\")\n return {'CANCELLED'}\n\n if ActRec_pref.import_extension == \".zip\" or ActRec_pref.import_extension == \".json\":\n\n if self.mode == \"overwrite\":\n for i in range(len(ActRec_pref.categories)):\n ui_functions.unregister_category(ActRec_pref, i)\n ActRec_pref.global_actions.clear()\n ActRec_pref.categories.clear()\n\n if ActRec_pref.import_extension == \".zip\":\n # Only used because old Version used .zip to export and directory and file structure\n # Categories where saved as directories and Actions where saved as files in the specific directory\n data = defaultdict(list)\n zip_file = zipfile.ZipFile(self.filepath, mode='r')\n for category in ActRec_pref.import_settings:\n if category.use and any(action.use for action in category.actions):\n actions = list(\n filter(lambda x: x.use, category.actions))\n category_actions = [\n {\n 'id': uuid.uuid1().hex,\n 'label': action.label,\n 'macros': self.get_macros_from_file(context, zip_file, action.identifier),\n 'icon': int(action.identifier.split(\"~\")[-1].split(\".\")[0])\n }for action in actions\n ]\n data['categories'].append({\n 'id': uuid.uuid1().hex,\n 'label': category.label,\n 'actions': [{\"id\": action['id']} for action in category_actions]\n })\n data['actions'] += category_actions\n functions.import_global_from_dict(ActRec_pref, data)\n elif ActRec_pref.import_extension == \".json\":\n with open(self.filepath, 'r', encoding='utf-8') as file:\n data = json.loads(file.read())\n category_ids = set(category.identifier for category in ActRec_pref.import_settings if category.use)\n action_ids = []\n for category in ActRec_pref.import_settings:\n action_ids += [action.identifier for action in category.actions if action.use]\n action_ids = set(action_ids)\n\n data['categories'] = [category for category in data['categories'] if category['id'] in category_ids]\n data['actions'] = [action for action in data['actions'] if action['id'] in action_ids]\n current_action_ids = set(action.id for action in ActRec_pref.global_actions)\n current_category_ids = set(action.id for action in ActRec_pref.categories)\n for action in data['actions']:\n if action['id'] not in current_action_ids:\n continue\n action['id'] = uuid.uuid1().hex\n for category in data['categories']:\n if category['id'] not in current_category_ids:\n continue\n category['id'] = uuid.uuid1().hex\n functions.import_global_from_dict(ActRec_pref, data)\n if self.include_keymap:\n default_km = keymaps.get('default')\n keymap.load_action_keymap_data(data, default_km.keymap_items)\n else:\n self.report({'ERROR'}, \"Select a .json or .zip file {%s}\" % self.filepath)\n ActRec_pref = get_preferences(context)\n ActRec_pref.import_settings.clear()\n if ActRec_pref.autosave:\n functions.save(ActRec_pref)\n context.area.tag_redraw()\n return {\"FINISHED\"}\n\n def draw(self, context: bpy.types.Context):\n ActRec_pref = get_preferences(context)\n layout = self.layout\n layout.operator(\n \"ar.global_import_settings\",\n text=\"Load import settings\"\n ).filepath = self.filepath\n col = layout.column(align=True)\n row = col.row(align=True)\n row.prop(self, 'mode', expand=True)\n col.prop(self, 'include_keymap')\n for category in ActRec_pref.import_settings:\n box = col.box()\n sub_col = box.column()\n row = sub_col.row()\n if category.show:\n row.prop(category, 'show', icon=\"TRIA_DOWN\", text=\"\", emboss=False)\n else:\n row.prop(category, 'show', icon=\"TRIA_RIGHT\", text=\"\", emboss=False)\n row.prop(category, 'use', text=\"\")\n row.label(text=category.label)\n if not category.show:\n continue\n sub_col = box.column()\n for action in category.actions:\n row = sub_col.row()\n row.prop(action, 'use', text=\"\")\n row.label(text=action.label)\n if not self.include_keymap:\n continue\n sub2_col = row.column()\n sub2_col.alignment = 'RIGHT'\n sub2_col.enabled = False\n sub2_col.label(text=action.shortcut)\n\n def cancel(self, context: bpy.types.Context):\n ActRec_pref = get_preferences(context)\n ActRec_pref.import_settings.clear()\n\n\nclass AR_OT_global_import_settings(Operator):\n bl_idname = \"ar.global_import_settings\"\n bl_label = \"Load import settings\"\n bl_description = \"Loads the select file to change the import settings\"\n\n filepath: StringProperty()\n from_operator: BoolProperty(default=False)\n\n def valid_file(self, file: str) -> bool:\n # Only used because old Version used .zip to export and directory and file structure\n # Categories where saved as directories and Actions where saved as files in the specific directory\n \"\"\"\n check if the given file is valid based on the string\n the file must match the pattern ~~.py\n\n Args:\n file (str): filename with extension\n\n Returns:\n bool: is valid\n \"\"\"\n if file.count('~') == 2:\n # remove .py from filename and split apart\n index, name, icon = \".\".join(file.split(\".\")[:-1]).split(\"~\")\n return index.isdigit() and (icon.isupper() or icon.isdigit())\n return False\n\n def valid_directory(self, directory: str) -> bool:\n # Only used because old Version used .zip to export and directory and file structure\n # Categories where saved as directories and Actions where saved as files in the specific directory\n \"\"\"\n check if the given directory is valid based on the string\n the directory must match the pattern ~\n\n Args:\n directory (str): directory, directory with path is not allowed e.g. my_path/my_dir\n\n Returns:\n bool: is valid\n \"\"\"\n if directory.count('~') == 1:\n index, name = directory.split('~')\n return index.isdigit()\n return False\n\n def import_sorted_zip(self, filepath: str) -> Union[dict, str]:\n # Only used because old Version used .zip to export and directory and file structure\n # Categories where saved as directories and Actions where saved as files in the specific directory\n \"\"\"\n sort the directories inside the zip based on the specific category pattern\n\n Args:\n filepath (str): path the zip file\n\n Returns:\n Union[dict, str]:\n Success (dict): category as key with list of files;\n Error (str): file that occurred with the error\n \"\"\"\n with zipfile.ZipFile(filepath, 'r') as zip_file:\n filepaths = sorted(zip_file.namelist())\n categories = defaultdict(list)\n\n for file in filter(lambda x: x.endswith(\".py\"), filepaths):\n split = file.split(\"/\")\n if len(split) < 2:\n return file\n category = split[-2]\n action_file = split[-1]\n if not (self.valid_directory(category) and self.valid_file(action_file)):\n return file\n categories[category].append(file)\n for item in categories.values():\n item.sort(key=lambda x: int(x.split(\"/\")[-1].split('~')[0]))\n return categories\n\n def map_shortcut_to_actions(self, context: bpy.types.Context, keymap_data: dict) -> dict:\n \"\"\"\n maps the keymap to the corresponding action id\n\n Args:\n keymap_data (str): JSON Format\n\n Returns:\n dict: key (str): id; value (str): shortcut\n \"\"\"\n km = context.window_manager.keyconfigs.addon.keymaps.new(name=\"AR_IMPORT_TEMP\")\n keymap.load_action_keymap_data(keymap_data, km.keymap_items)\n shortcut_map = {}\n for kmi in km.keymap_items:\n shortcut_map[kmi.properties.id] = kmi.to_string()\n context.window_manager.keyconfigs.addon.keymaps.remove(km)\n return shortcut_map\n\n def execute(self, context):\n ActRec_pref = get_preferences(context)\n ActRec_pref.import_settings.clear()\n\n # REFACTOR indentation\n if os.path.exists(self.filepath):\n if self.filepath.endswith(\".zip\"):\n # Only used because old Version used .zip to export and directory and file structure\n # Categories where saved as directories and Actions where saved as files in the specific directory\n ActRec_pref.import_extension = \".zip\"\n categories_paths = self.import_sorted_zip(self.filepath)\n if isinstance(categories_paths, str):\n if not self.from_operator:\n self.report(\n {'ERROR'}, \"The selected file is not compatible (%s)\" % categories_paths)\n return {'CANCELLED'}\n for key, item in sorted(categories_paths.items(), key=lambda x: int(x[0].split('~')[0])):\n new_category = ActRec_pref.import_settings.add()\n new_category.identifier = key\n new_category.label = key.split('~')[1]\n for file in item:\n new_action = new_category.actions.add()\n new_action.identifier = file\n new_action.label = file.split(\"/\")[-1].split('~')[1]\n return {\"FINISHED\"}\n elif self.filepath.endswith(\".json\"):\n ActRec_pref.import_extension = \".json\"\n try:\n with open(self.filepath, 'r', encoding='utf-8') as file:\n data = json.loads(file.read())\n actions = {action['id']: action for action in data['actions']}\n shortcut_map = self.map_shortcut_to_actions(context, data)\n for category in data['categories']:\n new_category = ActRec_pref.import_settings.add()\n new_category.identifier = category['id']\n new_category.label = category['label']\n for id in category['actions']:\n action = actions[id['id']]\n new_action = new_category.actions.add()\n new_action.identifier = action['id']\n new_action.label = action['label']\n new_action.shortcut = shortcut_map.get(action['id'], \"\")\n return {\"FINISHED\"}\n except Exception as err:\n logger.error(\"selected .json file not compatible (%s)\" % err)\n self.report({'ERROR'}, \"The selected file is not compatible (%s)\" % self.filepath)\n return {'CANCELLED'}\n\n if not self.from_operator:\n self.report({'ERROR'}, \"You need to select a .json or .zip file\")\n self.from_operator = False\n return {'CANCELLED'}\n\n\nclass AR_OT_global_export(Operator, ExportHelper):\n bl_idname = \"ar.global_export\"\n bl_label = \"Export\"\n bl_description = \"Export the Action file as a .json file\"\n\n def get_export_all(self) -> bool:\n \"\"\"\n default getter for export all\n\n Returns:\n bool: state of export all\n \"\"\"\n return self.get(\"export_all\", False)\n\n def set_export_all(self, value: bool):\n \"\"\"\n setter for export all\n transfer the value to all categories and actions\n\n Args:\n value (bool): state of export all\n \"\"\"\n self[\"export_all\"] = value\n for category in self.export_categories:\n category[\"export_all\"] = value\n for action in category.actions:\n action[\"export_all\"] = value\n\n filter_glob: StringProperty(default='*.json', options={'HIDDEN'})\n filename_ext = \".json\"\n\n filepath: StringProperty(\n name=\"File Path\",\n description=\"Filepath used for exporting the file\",\n maxlen=1024,\n subtype='FILE_PATH',\n default=\"ActionRecorderButtons\"\n )\n\n export_all: BoolProperty(\n name=\"All\",\n description=\"Export all category\",\n get=get_export_all,\n set=set_export_all\n )\n export_categories: CollectionProperty(type=properties.AR_global_export_categories)\n\n include_keymap: BoolProperty(\n name=\"Include Shortcuts\",\n description=\"Include the added Shortcuts of the actions\",\n default=True\n )\n\n def append_keymap(self, data, export_action_ids):\n default_km = keymaps.get('default')\n for kmi in default_km.keymap_items:\n if kmi.idname == \"ar.global_execute_action\" and kmi.properties['id'] in export_action_ids:\n data['keymap'].append({\n 'id': kmi.properties['id'],\n 'active': kmi.active,\n 'type': kmi.type,\n 'value': kmi.value,\n 'any': kmi.any,\n 'shift': kmi.shift,\n 'ctrl': kmi.ctrl,\n 'alt': kmi.alt,\n 'oskey': kmi.oskey,\n 'key_modifier': kmi.key_modifier,\n 'repeat': kmi.repeat,\n 'map_type': kmi.map_type\n })\n\n @classmethod\n def poll(cls, context: bpy.types.Context):\n ActRec_pref = get_preferences(context)\n return len(ActRec_pref.global_actions)\n\n def invoke(self, context: bpy.types.Context, event: bpy.types.Event):\n # Make copy of categories and actions ot export_categories and export_actions\n ActRec_pref = get_preferences(context)\n for category in ActRec_pref.categories:\n new_category = self.export_categories.add()\n new_category.id = category.id\n new_category.label = category.label\n for id_action in category.actions:\n action = ActRec_pref.global_actions.get(id_action.id, None)\n if action is None:\n category.actions.remove(category.actions.find(id_action.id))\n continue\n new_action = new_category.actions.add()\n new_action.id = action.id\n new_action.label = action.label\n action_keymap = functions.get_action_keymap(action.id)\n if action_keymap:\n new_action.shortcut = action_keymap.to_string()\n return ExportHelper.invoke(self, context, event)\n\n def execute(self, context: bpy.types.Context):\n ActRec_pref = get_preferences(context)\n if not os.path.exists(os.path.dirname(self.filepath)):\n self.report({'ERROR', \"Directory doesn't exist\"})\n return {'CANCELLED'}\n if not self.filepath.endswith(\".json\"):\n self.report({'ERROR', \"File has to be a json file\"})\n return {'CANCELLED'}\n data = defaultdict(list)\n export_category_ids = set(\n category.id for category in self.export_categories if category.use\n )\n export_action_ids = []\n for category in self.export_categories:\n export_action_ids += set(\n action.id for action in category.actions if action.use\n )\n for category in ActRec_pref.categories:\n if category.id in export_category_ids:\n data['categories'].append(functions.property_to_python(\n category,\n exclude=[\"name\", \"selected\", \"actions.name\", \"areas.name\", \"areas.modes.name\"]\n ))\n for action in ActRec_pref.global_actions:\n if action.id in export_action_ids:\n data['actions'].append(functions.property_to_python(\n action,\n exclude=[\"name\", \"selected\", \"alert\", \"macros.name\", \"macros.is_available\", \"macros.alert\"]\n ))\n\n if self.include_keymap:\n self.append_keymap(data, export_action_ids)\n\n with open(self.filepath, 'w', encoding='utf-8') as file:\n json.dump(data, file, ensure_ascii=False, indent=2)\n self.cancel(context)\n return {'FINISHED'}\n\n def cancel(self, context: bpy.types.Context):\n self.export_categories.clear()\n self.all_categories = True\n\n def draw(self, context: bpy.types.Context):\n layout = self.layout\n layout.prop(self, 'include_keymap')\n layout.prop(self, 'export_all', text=\"All\")\n col = layout.column(align=True)\n for category in self.export_categories:\n box = col.box()\n col2 = box.column()\n row = col2.row()\n row.prop(\n category, 'show', icon=\"TRIA_DOWN\" if category.show else \"TRIA_RIGHT\", text=\"\", emboss=False)\n row.label(text=category.label)\n row.prop(category, 'use', text=\"\")\n\n if not category.show:\n continue\n col2 = box.column(align=False)\n for action in category.actions:\n sub_row = col2.row()\n sub_row.prop(action, 'use', text='')\n sub_row.label(text=action.label)\n if self.include_keymap:\n sub_col = sub_row.column()\n sub_col.alignment = 'RIGHT'\n sub_col.enabled = False\n sub_col.label(text=action.shortcut)\n\n\nclass AR_OT_global_save(Operator):\n bl_idname = \"ar.global_save\"\n bl_label = \"Save\"\n bl_description = \"Save all Global Actions to the Storage\"\n\n def execute(self, context: bpy.types.Context):\n functions.save(get_preferences(context))\n return {\"FINISHED\"}\n\n\nclass AR_OT_global_load(Operator):\n bl_idname = \"ar.global_load\"\n bl_label = \"Load\"\n bl_description = \"Load all Actions from the Storage\"\n\n def execute(self, context: bpy.types.Context):\n ActRec_pref = get_preferences(context)\n functions.load(ActRec_pref)\n context.area.tag_redraw()\n return {\"FINISHED\"}\n\n\nclass AR_OT_global_to_local(shared.Id_based, Operator):\n bl_idname = \"ar.global_to_local\"\n bl_label = \"Global Action to Local\"\n bl_description = \"Transfer the selected Action to Local-actions\"\n bl_options = {'UNDO'}\n\n @ classmethod\n def poll(cls, context: bpy.types.Context):\n ActRec_pref = get_preferences(context)\n return len(ActRec_pref.global_actions) and len(ActRec_pref.get(\"global_actions.selected_ids\", []))\n\n def global_to_local(self, ActRec_pref: bpy.types.AddonPreferences, action: 'AR_global_actions'):\n \"\"\"\n copy the given global action to a local action\n\n Args:\n ActRec_pref (bpy.types.AddonPreferences): preferences of this addon\n action (AR_global_actions): action to copy\n \"\"\"\n id = uuid.uuid1().hex if action.id in set(x.id for x in ActRec_pref.local_actions) else action.id\n data = functions.property_to_python(\n action,\n exclude=[\"name\", \"alert\", \"macros.name\", \"macros.alert\", \"macros.is_available\"]\n )\n data[\"id\"] = id\n functions.add_data_to_collection(ActRec_pref.local_actions, data)\n ActRec_pref.active_local_action_index = len(ActRec_pref.local_actions)\n\n def execute(self, context: bpy.types.Context):\n ActRec_pref = get_preferences(context)\n for id in functions.get_global_action_ids(ActRec_pref, self.id, self.index):\n self.global_to_local(ActRec_pref, ActRec_pref.global_actions[id])\n # REFACTOR indentation ?\n if ActRec_pref.global_to_local_mode == 'move':\n ActRec_pref.global_actions.remove(ActRec_pref.global_actions.find(id))\n for category in ActRec_pref.categories:\n category.actions.remove(category.actions.find(id))\n functions.save_local_to_scene(ActRec_pref, context.scene)\n context.area.tag_redraw()\n self.clear()\n return {\"FINISHED\"}\n\n\nclass AR_OT_global_remove(shared.Id_based, Operator):\n bl_idname = \"ar.global_remove\"\n bl_label = \"Remove Action\"\n bl_description = \"Remove the selected actions\"\n\n @classmethod\n def description(cls, context, properties):\n ActRec_pref = get_preferences(context)\n ids = ActRec_pref.get(\"global_actions.selected_ids\", [])\n selected_actions_str = \", \".join(ActRec_pref.global_actions[id].label for id in ids)\n return \"Remove the selected actions\\nActions: %s\" % (selected_actions_str)\n\n @classmethod\n def poll(cls, context: bpy.types.Context):\n ActRec_pref = get_preferences(context)\n return len(ActRec_pref.global_actions) and len(ActRec_pref.get(\"global_actions.selected_ids\", []))\n\n def execute(self, context: bpy.types.Context):\n ActRec_pref = get_preferences(context)\n for id in functions.get_global_action_ids(ActRec_pref, self.id, self.index):\n if functions.get_action_keymap(id) is not None:\n functions.remove_action_keymap(id)\n ActRec_pref.global_actions.remove(ActRec_pref.global_actions.find(id))\n for category in ActRec_pref.categories:\n category.actions.remove(category.actions.find(id))\n context.area.tag_redraw()\n self.clear()\n return {\"FINISHED\"}\n\n def invoke(self, context: bpy.types.Context, event: bpy.types.Event):\n return context.window_manager.invoke_confirm(self, event)\n\n\nclass AR_OT_global_move_up(shared.Id_based, Operator):\n bl_idname = \"ar.global_move_up\"\n bl_label = \"Move Action Up\"\n bl_description = \"Move the selected actions Up\"\n\n @classmethod\n def poll(cls, context: bpy.types.Context):\n ActRec_pref = get_preferences(context)\n return len(ActRec_pref.global_actions) and len(ActRec_pref.get(\"global_actions.selected_ids\", []))\n\n def execute(self, context: bpy.types.Context):\n ActRec_pref = get_preferences(context)\n ids = set(functions.get_global_action_ids(ActRec_pref, self.id, self.index))\n for category in ActRec_pref.categories:\n for id_action in category.actions:\n # REFACTOR indentation\n if id_action.id in ids:\n index = category.actions.find(id_action.id)\n category.actions.move(index, index - 1)\n context.area.tag_redraw()\n self.clear()\n return {\"FINISHED\"}\n\n\nclass AR_OT_global_move_down(shared.Id_based, Operator):\n bl_idname = \"ar.global_move_down\"\n bl_label = \"Move Action Down\"\n bl_description = \"Move the selected actions Down\"\n\n @classmethod\n def poll(cls, context):\n ActRec_pref = get_preferences(context)\n return len(ActRec_pref.global_actions) and len(ActRec_pref.get(\"global_actions.selected_ids\", []))\n\n def execute(self, context: bpy.types.Context):\n ActRec_pref = get_preferences(context)\n ids = set(functions.get_global_action_ids(ActRec_pref, self.id, self.index))\n for category in ActRec_pref.categories:\n for id_action in reversed(list(category.actions)):\n # REFACTOR indentation\n if id_action.id in ids:\n index = category.actions.find(id_action.id)\n category.actions.move(index, index + 1)\n context.area.tag_redraw()\n self.clear()\n return {\"FINISHED\"}\n\n\nclass AR_OT_global_rename(shared.Id_based, Operator):\n bl_idname = \"ar.global_rename\"\n bl_label = \"Rename Button\"\n bl_description = \"Rename the selected Button\"\n\n label: StringProperty()\n\n @classmethod\n def poll(cls, context: bpy.types.Context):\n ActRec_pref = get_preferences(context)\n return len(ActRec_pref.global_actions) and len(ActRec_pref.get(\"global_actions.selected_ids\", [])) == 1\n\n def execute(self, context: bpy.types.Context):\n ActRec_pref = get_preferences(context)\n ids = functions.get_global_action_ids(ActRec_pref, self.id, self.index)\n self.clear()\n label = self.label\n self.label = \"\"\n\n # REFACTOR indentation\n if len(ids) == 1:\n id = ids[0]\n action = ActRec_pref.global_actions.get(id, None)\n if action:\n ActRec_pref.global_actions[id].label = label\n if ActRec_pref.autosave:\n functions.save(ActRec_pref)\n context.area.tag_redraw()\n return {\"FINISHED\"}\n return {'CANCELLED'}\n\n\nclass AR_OT_global_execute_action(shared.Id_based, Operator):\n bl_idname = 'ar.global_execute_action'\n bl_label = 'ActRec Action Button'\n bl_description = 'Play this Action Button'\n bl_options = {'UNDO', 'INTERNAL'}\n\n @classmethod\n def description(cls, context: bpy.types.Context, properties):\n ActRec_pref = functions.get_preferences(context)\n id = functions.get_global_action_id(ActRec_pref, properties.id, properties.index)\n action = ActRec_pref.global_actions[id]\n return action.description\n\n def execute(self, context: bpy.types.Context):\n ActRec_pref = get_preferences(context)\n id = functions.get_global_action_id(ActRec_pref, self.id, self.index)\n self.clear()\n if id is None:\n return {'CANCELLED'}\n action = ActRec_pref.global_actions[id]\n err = functions.play(context, action.macros, action, 'global_actions')\n if err:\n self.report({'ERROR'}, str(err))\n return {'FINISHED'}\n\n\nclass AR_OT_global_icon(icon_manager.Icontable, shared.Id_based, Operator):\n bl_idname = \"ar.global_icon\"\n\n def invoke(self, context: bpy.types.Context, event: bpy.types.Event):\n ActRec_pref = get_preferences(context)\n id = functions.get_global_action_id(ActRec_pref, self.id, self.index)\n if id is None:\n self.clear()\n return {'CANCELLED'}\n self.id = id\n if not self.reuse:\n ActRec_pref.selected_icon = ActRec_pref.global_actions[id].icon\n self.search = ''\n return context.window_manager.invoke_props_dialog(self, width=1000)\n\n def execute(self, context: bpy.types.Context):\n ActRec_pref = get_preferences(context)\n ActRec_pref.global_actions[self.id].icon = ActRec_pref.selected_icon\n ActRec_pref.selected_icon = 0 # Icon: NONE\n self.reuse = False\n if ActRec_pref.autosave:\n functions.save(ActRec_pref)\n bpy.context.area.tag_redraw()\n self.clear()\n return {\"FINISHED\"}\n\n\nclass AR_OT_add_ar_shortcut(Operator):\n bl_idname = \"ar.add_ar_shortcut\"\n bl_label = \"Add Shortcut\"\n bl_options = {'INTERNAL'}\n bl_description = \"Add a Shortcut to the selected Action\"\n\n id: StringProperty()\n\n def draw(self, context: bpy.types.Context):\n # REFACTOR indentation\n self.layout.label(text=self.bl_label)\n for kmi in keymap.keymaps['default'].keymap_items:\n if kmi.idname == \"ar.global_execute_action\" and kmi.properties.id == self.id:\n self.layout.prop(kmi, \"type\", text=\"\", full_event=True)\n kmi.active = True\n break\n\n def invoke(self, context: bpy.types.Context, event: bpy.types.Event):\n if self.id and functions.get_action_keymap(self.id) is None:\n functions.add_empty_action_keymap(self.id)\n return context.window_manager.invoke_popup(self)\n\n def execute(self, context: bpy.types.Context):\n return {\"FINISHED\"}\n\n def cancel(self, context: bpy.types.Context):\n # Use cancel as execution of changed keymap (not intended use of invoke_popup)\n ActRec_pref = get_preferences(context)\n kmi = functions.get_action_keymap(self.id)\n if self.id == '' or functions.is_action_keymap_empty(kmi):\n functions.remove_action_keymap(self.id)\n return\n if ActRec_pref.autosave:\n functions.save(ActRec_pref)\n\n\nclass AR_OT_remove_ar_shortcut(Operator):\n bl_idname = \"ar.remove_ar_shortcut\"\n bl_label = \"Remove Shortcut\"\n bl_options = {'INTERNAL'}\n bl_description = \"Remove the Shortcut from the selected Action\"\n\n id: StringProperty()\n\n def execute(self, context: bpy.types.Context):\n ActRec_pref = get_preferences(context)\n if functions.get_action_keymap(self.id) is None:\n return {\"CANCELLED\"}\n functions.remove_action_keymap(self.id)\n if ActRec_pref.autosave:\n functions.save(ActRec_pref)\n return {\"FINISHED\"}\n\n\nclass AR_OT_global_edit_description(Operator):\n bl_idname = \"ar.global_edit_description\"\n bl_label = \"Edit Description\"\n bl_options = {'INTERNAL'}\n bl_description = \"Edit the description of this Action\"\n\n id: StringProperty()\n description: StringProperty(\n name=\"Description\",\n description=\"Sets the description that gets shown when hovered over this action\"\n )\n action_label: StringProperty()\n\n def draw(self, context: bpy.types.Context):\n layout = self.layout\n layout.label(text=\"Action: %s\" % self.action_label)\n layout.prop(self, 'description')\n\n def invoke(self, context: bpy.types.Context, event: bpy.types.Event):\n ActRec_pref = functions.get_preferences(context)\n id = functions.get_global_action_id(ActRec_pref, self.id, -1)\n action = ActRec_pref.global_actions[id]\n self.action_label = action.label\n self.description = action.description\n return context.window_manager.invoke_props_dialog(self)\n\n def execute(self, context: bpy.types.Context):\n ActRec_pref = functions.get_preferences(context)\n id = functions.get_global_action_id(ActRec_pref, self.id, -1)\n ActRec_pref.global_actions[id].description = self.description\n if ActRec_pref.autosave:\n functions.save(ActRec_pref)\n return {'FINISHED'}\n\n# endregion\n\n\nclasses = [\n AR_OT_gloabal_recategorize_action,\n AR_OT_global_import,\n AR_OT_global_import_settings,\n AR_OT_global_export,\n AR_OT_global_save,\n AR_OT_global_load,\n AR_OT_global_to_local,\n AR_OT_global_remove,\n AR_OT_global_move_up,\n AR_OT_global_move_down,\n AR_OT_global_rename,\n AR_OT_global_execute_action,\n AR_OT_global_icon,\n AR_OT_add_ar_shortcut,\n AR_OT_remove_ar_shortcut,\n AR_OT_global_edit_description\n]\n\n# region Registration\n\n\ndef register():\n for cls in classes:\n bpy.utils.register_class(cls)\n\n\ndef unregister():\n for cls in classes:\n bpy.utils.unregister_class(cls)\n# endregion\n","repo_name":"InamuraJIN/ActionRecorder","sub_path":"ActRec/actrec/operators/globals.py","file_name":"globals.py","file_ext":"py","file_size_in_byte":36058,"program_lang":"python","lang":"en","doc_type":"code","stars":158,"dataset":"github-code","pt":"49"} +{"seq_id":"19193888314","text":"import pickle\nfrom collections import Counter\n\nfrom flask import Flask, request, jsonify\nfrom gensim.models import Word2Vec\n\nimport config\nimport cosine_sim\nimport tokenizer\nimport top_files\nimport math\nfrom inverted_index_gcp import InvertedIndex\n\n\nclass MyFlaskApp(Flask):\n def run(self, host=None, port=None, debug=None, **options):\n super(MyFlaskApp, self).run(host=host, port=port, debug=debug, **options)\n\n\napp = MyFlaskApp(__name__)\napp.config['JSONIFY_PRETTYPRINT_REGULAR'] = False\n\nbody_index = InvertedIndex().read_index(config.path_to_body_index, \"index\")\ntitle_index = InvertedIndex().read_index(config.path_to_title_index, \"index\")\nanchor_index = InvertedIndex().read_index(config.path_to_anchor_index, \"index\")\n# body_stemming_index = InvertedIndex().read_index(config.path_to_body_stemming_index, \"index\")\n\nwith open(\"pkl/id_to_title.pkl\", 'rb') as f:\n id_to_title = pickle.load(f)\n\nwith open(\"pkl/page_rank.pkl\", 'rb') as f:\n page_rank = pickle.load(f)\n\nwith open(\"pkl/page_views.pkl\", 'rb') as f:\n page_views = pickle.load(f)\n\nwith open(\"pkl/DL_body.pkl\", 'rb') as f:\n body_index.DL = pickle.load(f)\n\nwith open(\"pkl/DL_title.pkl\", 'rb') as f:\n title_index.DL = pickle.load(f)\n\nword2vec = Word2Vec.load(\"word2vec111_chank_49000.model\")\n\n\ndef title_from_id_list(lst):\n return list(map(lambda x: (x[0], id_to_title[int(x[0])]), lst))\n\n\ndef calculate_cosin_sim(query, index_to_sim, path, stemming=True):\n tokenized_query = tokenizer.tokenize(query)\n if stemming:\n tokenized_query = tokenizer.stemmeing(tokenized_query)\n return cosine_sim.temp(tokenized_query, index_to_sim, path)\n\n\ndef matching_terms(tokenized_query, index_to_match, path):\n dic = {}\n for token in set(tokenized_query):\n if index_to_match.df.get(token):\n for doc, tf in index_to_match.read_posting_list(path, token):\n dic[doc] = dic.get(doc, 0) + 1\n return dic\n\n\n@app.route(\"/search\")\ndef search():\n \"\"\" Returns up to a 100 of your best search results for the query. This is\n the place to put forward your best search engine, and you are free to\n implement the retrieval whoever you'd like within the bound of the\n project requirements (efficiency, quality, etc.). That means it is up to\n you to decide on whether to use stemming, remove stopwords, use\n PageRank, query expansion, etc.\n\n To issue a query navigate to a URL like:\n http://YOUR_SERVER_DOMAIN/search?query=hello+world\n where YOUR_SERVER_DOMAIN is something like XXXX-XX-XX-XX-XX.ngrok.io\n if you're using ngrok on Colab or your external IP on GCP.\n Returns:\n --------\n list of up to 100 search results, ordered from best to worst where each\n element is a tuple (wiki_id, title).\n \"\"\"\n res = []\n query = request.args.get('query', '')\n if len(query) == 0:\n return jsonify(res)\n tokenized_query = tokenizer.tokenize(query)\n\n body_result = Counter(matching_terms(tokenized_query, body_index, config.path_to_body_index)).most_common()\n title_result = matching_terms(tokenized_query, title_index, config.path_to_title_index)\n more_to_add = 300\n body_len = len(body_result)\n\n for doc_id, score in body_result:\n title_score = title_result.get(doc_id)\n body_len -= 1\n if title_score:\n more_to_add -= 1\n res.append(\n (doc_id, score + title_score, page_rank.get(doc_id, 0), page_views.get(doc_id, 0)))\n elif body_len < more_to_add:\n res.append((doc_id, score, page_rank.get(doc_id, 0), page_views.get(doc_id, 0)))\n if more_to_add == 0:\n break\n\n # print(len(res))\n res = sorted(res, key=lambda x: (-x[1], -x[3], -x[2]))[:100]\n\n res = title_from_id_list(res)\n # if len(res) < 100:\n # res = list(res+title_from_id_list(list(body_result)))[:100]\n\n return jsonify(res)\n\n\n@app.route(\"/search_body\")\ndef search_body():\n \"\"\" Returns up to a 100 search results for the query using TFIDF AND COSINE\n SIMILARITY OF THE BODY OF ARTICLES ONLY. DO NOT use stemming. DO USE the\n staff-provided tokenizer from Assignment 3 (GCP part) to do the\n tokenization and remove stopwords.\n\n To issue a query navigate to a URL like:\n http://YOUR_SERVER_DOMAIN/search_body?query=hello+world\n where YOUR_SERVER_DOMAIN is something like XXXX-XX-XX-XX-XX.ngrok.io\n if you're using ngrok on Colab or your external IP on GCP.\n Returns:\n --------\n list of up to 100 search results, ordered from best to worst where each\n element is a tuple (wiki_id, title).\n \"\"\"\n res = []\n query = request.args.get('query', '')\n if len(query) == 0:\n return jsonify(res)\n # BEGIN SOLUTION\n body_cosine = calculate_cosin_sim(query, body_index, config.path_to_body_index, False)\n res = top_files.get_top_n(body_cosine, id_to_title, 100)\n\n # END SOLUTION\n return jsonify(res)\n\n\n@app.route(\"/search_title\")\ndef search_title():\n \"\"\" Returns ALL (not just top 100) search results that contain A QUERY WORD\n IN THE TITLE of articles, ordered in descending order of the NUMBER OF\n DISTINCT QUERY WORDS that appear in the title. DO NOT use stemming. DO\n USE the staff-provided tokenizer from Assignment 3 (GCP part) to do the\n tokenization and remove stopwords. For example, a document\n with a title that matches two distinct query words will be ranked before a\n document with a title that matches only one distinct query word,\n regardless of the number of times the term appeared in the title (or\n query).\n\n Test this by navigating to the a URL like:\n http://YOUR_SERVER_DOMAIN/search_title?query=hello+world\n where YOUR_SERVER_DOMAIN is something like XXXX-XX-XX-XX-XX.ngrok.io\n if you're using ngrok on Colab or your external IP on GCP.\n Returns:\n --------\n list of ALL (not just top 100) search results, ordered from best to\n worst where each element is a tuple (wiki_id, title).\n \"\"\"\n res = []\n query = request.args.get('query', '')\n if len(query) == 0:\n return jsonify(res)\n tokenized_query = tokenizer.tokenize(query)\n res = Counter(matching_terms(tokenized_query, title_index, config.path_to_title_index)).most_common()\n res = title_from_id_list(res)\n\n return jsonify(res)\n\n\n@app.route(\"/search_anchor\")\ndef search_anchor():\n \"\"\" Returns ALL (not just top 100) search results that contain A QUERY WORD\n IN THE ANCHOR TEXT of articles, ordered in descending order of the\n NUMBER OF QUERY WORDS that appear in anchor text linking to the page.\n DO NOT use stemming. DO USE the staff-provided tokenizer from Assignment\n 3 (GCP part) to do the tokenization and remove stopwords. For example,\n a document with a anchor text that matches two distinct query words will\n be ranked before a document with anchor text that matches only one\n distinct query word, regardless of the number of times the term appeared\n in the anchor text (or query).\n\n Test this by navigating to the a URL like:\n http://YOUR_SERVER_DOMAIN/search_anchor?query=hello+world\n where YOUR_SERVER_DOMAIN is something like XXXX-XX-XX-XX-XX.ngrok.io\n if you're using ngrok on Colab or your external IP on GCP.\n Returns:\n --------\n list of ALL (not just top 100) search results, ordered from best to\n worst where each element is a tuple (wiki_id, title).\n \"\"\"\n res = []\n query = request.args.get('query', '')\n if len(query) == 0:\n return jsonify(res)\n tokenized_query = tokenizer.tokenize(query)\n res = Counter(matching_terms(tokenized_query, anchor_index, config.path_to_anchor_index)).most_common()\n res = title_from_id_list(res)\n\n return jsonify(res)\n\n\n@app.route(\"/get_pagerank\", methods=['POST'])\ndef get_pagerank():\n \"\"\" Returns PageRank values for a list of provided wiki article IDs.\n\n Test this by issuing a POST request to a URL like:\n http://YOUR_SERVER_DOMAIN/get_pagerank\n with a json payload of the list of article ids. In python do:\n import requests\n requests.post('http://YOUR_SERVER_DOMAIN/get_pagerank', json=[1,5,8])\n As before YOUR_SERVER_DOMAIN is something like XXXX-XX-XX-XX-XX.ngrok.io\n if you're using ngrok on Colab or your external IP on GCP.\n Returns:\n --------\n list of floats:\n list of PageRank scores that correrspond to the provided article IDs.\n \"\"\"\n res = []\n wiki_ids = request.get_json()\n if len(wiki_ids) == 0:\n return jsonify(res)\n\n for doc_id in wiki_ids:\n res.append(page_rank.get(doc_id, 0))\n\n return jsonify(res)\n\n\n@app.route(\"/get_pageview\", methods=['POST'])\ndef get_pageview():\n \"\"\" Returns the number of page views that each of the provide wiki articles\n had in August 2021.\n\n Test this by issuing a POST request to a URL like:\n http://YOUR_SERVER_DOMAIN/get_pageview\n with a json payload of the list of article ids. In python do:\n import requests\n requests.post('http://YOUR_SERVER_DOMAIN/get_pageview', json=[1,5,8])\n As before YOUR_SERVER_DOMAIN is something like XXXX-XX-XX-XX-XX.ngrok.io\n if you're using ngrok on Colab or your external IP on GCP.\n Returns:\n --------\n list of ints:\n list of page view numbers from August 2021 that correrspond to the\n provided list article IDs.\n \"\"\"\n res = []\n wiki_ids = request.get_json()\n if len(wiki_ids) == 0:\n return jsonify(res)\n\n for doc_id in wiki_ids:\n res.append(page_views.get(doc_id, 0))\n\n return jsonify(res)\n\n\nif __name__ == '__main__':\n # run the Flask RESTful API, make the server publicly available (host='0.0.0.0') on port 8080\n app.run(host='0.0.0.0', port=8080, debug=False)\n","repo_name":"omeraflalo/information-retrieval-project","sub_path":"search_frontend.py","file_name":"search_frontend.py","file_ext":"py","file_size_in_byte":9977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"12487629541","text":"from threading import Thread\nfrom functools import wraps\nimport time\n\nfrom car.car import Car\nfrom storage.storage_scan import StorageScan\nfrom storage.storage_object import StorageObject\n\nclass StorageService:\n def __init__(self, publish_distances_func, publish_location_func):\n self.publish_distances_func = publish_distances_func\n self.publish_location_func = publish_location_func\n self.scan_interval = 0.25\n # self.car = Car(6, 13, 19, 26, 21, 20, 16, 12,\n # 14, 15, 18, 17, 2, 3)\n self.car = Car(17, 18, 22, 23, 5, 6, 12, 13,\n 2, 3, 14, 15, 24, 25)\n\n self.car_running = False\n\n def _return_back_car(self, start_time, end_time):\n print('Returning back the car ...')\n duration = end_time - start_time\n remaining_duration = end_time - start_time\n step = 0.2\n\n self.car.stop()\n time.sleep(step)\n\n self.car.move_backward()\n while remaining_duration > 0:\n remaining_duration = remaining_duration - step\n location = remaining_duration / duration\n print(\"Location: \" + str(location))\n self.publish_location_func(location)\n time.sleep(step)\n\n self.car.stop()\n\n def _scan_storage_worker(self):\n storage_scan = StorageScan(210, 50, self.scan_interval)\n start_time = time.time()\n\n while self.car_running:\n dists = self.car.read_distances()\n emergency_stop_front = dists[3]\n storage_scan.add_dists((dists[0], dists[1], dists[2]))\n\n self.publish_distances_func(dists[0], dists[1], dists[2])\n\n # print(\"Front = %.2f cm, side = %.2f cm, back %.2f cm, emergency stop: %r\" % dists)\n if emergency_stop_front:\n self.car.stop()\n break\n\n time.sleep(self.scan_interval)\n\n end_time = time.time()\n\n objects = storage_scan.collect_objects()\n print('Number of scans: ' + str(storage_scan.no_scan_dists()) + '; number of objects: ' + str(len(objects)))\n for obj in objects:\n print('Start index: ' + str(obj.start_index) + '; end index: ' + str(obj.end_index))\n\n self._return_back_car(start_time, end_time)\n\n\n\n def scan_storage(self):\n dists = self.car.read_distances()\n emergency_stop_front = dists[3]\n\n if emergency_stop_front:\n print('Too close to objects. Car not moving forward!')\n return\n\n self.car_running = True\n self.car.move_forward()\n\n #eventlet.spawn(self.read_dists_worker)\n self._scan_storage_worker()\n\n def move_car_bwd(self):\n self.car.move_backward()\n\n def stop_car(self):\n self.car_running = False\n self.car.stop()","repo_name":"AlexandraAndritoi/Liga-AC-Labs-2018-Visma","sub_path":"storage_service.py","file_name":"storage_service.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"34734795641","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n\r\n@author: Boris Nedyalkov\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport Qf_extractor_functions as qf\r\n\r\ndata_set = np.loadtxt(\"localsweep_20221201_134354.sweep\",dtype=None)\r\n\r\nMaster_array = qf.data_massager(data_set) # this function takes the raw I and Q data and gives amliptude and phase\r\n\r\nm = 19 # the name (number) of the resonator you want ##in this sample there are only 60 resonators, the rest is blank\r\nfreq = Master_array [m,0,:]\r\namp = Master_array [m,1,:] \r\n \r\nQ_i, Qe, Q_tot, f_0, df, phi, e = qf.analysis_function (freq, amp) ## this function gives you the parameters of the resonator chosen\r\n\r\n###########################################################\r\n\r\nRe, Im = qf.Re_and_Im(freq, f_0, Q_tot, Qe, phi, e) ## get the Real and Imaginary parts of the S21 data\r\n\r\n############################################################\r\n\r\nfig1, (ax1) = plt.subplots(1, 1, figsize = (5, 5), dpi = 100)\r\nax1.plot(Re, Im, '-', color='#d63c49',linewidth=1.5)\r\n\r\nplt.show()\r\n\r\n###########################################################\r\n","repo_name":"Gandalfdore/Resonator_extractor","sub_path":"Q-f-extractor.py","file_name":"Q-f-extractor.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"13748410943","text":"# -*- coding: utf-8 -*-\n'''Functions for io file handling.\n Creating, writing, deleting, downloading, unzipping a file'''\n__author__ = 'Mark Zwaving'\n__email__ = 'markzwaving@gmail.com'\n__copyright__ = 'Copyright (C) Mark Zwaving. All rights reserved.'\n__license__ = 'GNU General Public License version 3 - GPLv3'\n__version__ = '0.2.3'\n__maintainer__ = 'Mark Zwaving'\n__status__ = 'Development'\n\nimport config as cfg\nimport sources.model.convert as convert\nimport sources.model.ymd as ymd\nimport sources.model.validate as validate\nimport sources.view.console as cnsl\nimport threading, urllib, json, os, time, zipfile, requests\nimport shutil, urllib.request, socket\nfrom urllib.parse import urlparse\n\ndownload_interval_time_min = 0.2\n\nabspath = lambda path: os.path.abspath(path)\nmk_path = lambda dir, f: abspath(os.path.join(dir, f))\n\ndef check(path, verbose=cfg.verbose):\n '''Function checks a file for existence'''\n ok = False\n cnsl.log(f'[{ymd.now()}] Check if a file exists', verbose)\n cnsl.log(f'File {path}', verbose)\n with threading.Lock():\n try:\n if os.path.exists(path): # Check if is there file\n ok = True\n except Exception as e:\n cnsl.log(f'Error check\\n{e}', cfg.error)\n else:\n if ok:\n cnsl.log('File exists', verbose)\n else:\n cnsl.log('File does not exist', verbose)\n return ok\n\ndef write(path='dummy.txt', content=cfg.e, encoding='utf-8', prefix='w', verbose=cfg.verbose):\n '''Function writes content to a file'''\n ok = False\n cnsl.log(f'[{ymd.now()}] write a file', verbose)\n cnsl.log(f'File {path}', verbose)\n with threading.Lock():\n try:\n map = os.path.dirname(path)\n if map: mk_dir(map, verbose) # Make map(s)\n with open(path, encoding=encoding, mode=prefix) as f:\n f.write(content)\n except Exception as e:\n cnsl.log(f'Error writing file\\n{e}', cfg.error)\n else:\n cnsl.log(f'[{ymd.now()}] Write file success', verbose)\n ok = True\n return ok\n\ndef save(path='dummy.txt', content=cfg.e, encoding='utf-8', prefix='w', verbose=cfg.verbose):\n '''Function writes content to a file'''\n return write(path, content, encoding, prefix, verbose)\n\ndef read(path, encoding='utf-8', verbose=cfg.verbose):\n '''Function reads the content in a file'''\n ok, t, paths = False, cfg.e, convert.to_lst(path)\n cnsl.log(f'[{ymd.now()}] Read a file', verbose)\n cnsl.log(f'File(s) {str(paths)}', verbose)\n\n for path in paths: # All paths\n with threading.Lock():\n if check(path, verbose):\n try:\n with open(path, encoding=encoding, mode='r') as f:\n t = f.read()\n except Exception as e:\n cnsl.log(f'Error reading a file\\n{e}', cfg.error)\n else:\n cnsl.log(f'[{ymd.now()}] Read file success', verbose)\n ok = True\n return ok, t\n\ndef readlines(path, encoding='utf-8',verbose=cfg.verbose):\n '''Function reads the content from a file into a list'''\n l, ok, t, paths = [], False, cfg.e, convert.to_lst(path)\n cnsl.log(f'[{ymd.now()}] Read file(s) into a list', verbose)\n cnsl.log(f'File(s) {str(paths)}', verbose)\n\n for path in paths: # All paths\n with threading.Lock():\n if check(path, verbose):\n try:\n with open(path, encoding=encoding, mode='r') as f:\n l = f.readlines()\n except Exception as e:\n cnsl.log(f'Error reading a file\\n{e}', cfg.error)\n else:\n cnsl.log(f'[{ymd.now()}] Read file success', verbose)\n ok = True\n return ok, l\n\ndef delete(path, verbose=cfg.verbose):\n '''Function deletes a file if exists'''\n ok, paths = False, convert.to_lst(path)\n cnsl.log(f'[{ymd.now()}] Delete file(s)', verbose)\n cnsl.log(f'File(s) {str(paths)}', verbose)\n\n for path in paths: # All paths\n with threading.Lock():\n if check(path, verbose):\n try:\n os.remove(path) # Remove file\n except Exception as e:\n cnsl.log(f'Error deleting a file\\n{e}', cfg.error)\n else:\n cnsl.log(f'[{ymd.now()}] Delete file success', verbose)\n ok = True\n else:\n cnsl.log(f'Cannot delete. File does not exist', verbose)\n return ok\n\ndef rm(path, verbose=cfg.verbose):\n '''Function removes a file if exists (canonical) for delete()'''\n return delete(path, verbose)\n\ndef mk_dir(dir, verbose=cfg.verbose):\n '''Function makes a map if not already exists'''\n ok, paths = False, convert.to_lst(dir)\n cnsl.log(f'[{ymd.now()}] Make directory(s)', verbose)\n cnsl.log(f'File(s) {str(paths)}', verbose)\n\n for path in paths: # All paths\n with threading.Lock():\n try:\n if os.path.isdir(path):\n cnsl.log('Map not made because it already exists.', verbose)\n ok = True\n else:\n os.makedirs(path)\n except Exception as e:\n cnsl.log(f'Error make directory\\n{e}', cfg.error)\n else:\n cnsl.log(f'[{ymd.now()}] Make directory {path} successful', verbose)\n ok = True\n\n return ok\n\ndef rm_dir(\n dir, # Map to remove\n verbose=cfg.verbose\n ):\n '''Function deletes an directory empty or not'''\n ok, paths = False, convert.to_lst(dir)\n cnsl.log(f'[{ymd.now()}] Remove directory(s)', verbose)\n cnsl.log(f'Dir(s) {str(paths)}', verbose)\n\n for path in paths: # All paths\n with threading.Lock():\n if os.path.exists(path):\n try:\n shutil.rmtree(path)\n except Exception as e:\n cnsl.log(f'Error removing map\\n{e}', cfg.error)\n else:\n cnsl.log(f'[{ymd.now()}] Remove map {path} successful', verbose)\n ok = True\n else:\n cnsl.log('Cannot remove {path}.\\nMap does not exist.', verbose)\n return ok\n\ndef is_dir_empthy(dir, verbose=cfg.verbose):\n ok = False\n cnsl.log(f'[{ymd.now()}] Check if dir is empty', verbose)\n cnsl.log(f'Dir {dir}', verbose)\n\n if os.path.exists(dir):\n ok = True if len(os.listdir(dir)) == 0 else False\n else:\n cnsl.log('Map does not exist.', verbose)\n ok = True\n\n return ok\n\ndef unzip(zip, txt, verbose=cfg.verbose):\n '''Function unzips a zipfile'''\n ok = False\n cnsl.log(f'[{ymd.now()}] Unzip a file', verbose)\n cnsl.log(f'From {zip}\\nTo {txt}', verbose) # TODO force to txt file\n with threading.Lock():\n try:\n dir_txt = os.path.dirname(txt)\n with zipfile.ZipFile(zip, 'r') as z:\n z.extractall(dir_txt)\n except Exception as e:\n cnsl.log(f'Error unzip\\n{e}', cfg.error)\n else:\n cnsl.log(f'[{ymd.now()}] Unzip success', verbose)\n ok = True\n return ok\n\ndef download(\n url, # Url to download\n path, # Path to download the file to\n check = False, # Check file True will not overwrite the file if exists\n verbose = cfg.verbose # Overwrite default value verbose -> see config.py\n ):\n '''Function downloads a file from an internet url'''\n ok = False\n cnsl.log(f'[{ymd.now()}] Download a file', verbose)\n cnsl.log(f'From {url}\\nTo {path}', verbose)\n\n # Check if image is already downloaded\n if check and os.path.exists(path):\n cnsl.log(f'Download skipped, file already exists', verbose)\n ok = True\n else:\n with threading.Lock():\n if url_exists(url, False): # Check if a url exists\n try:\n mk_dir(os.path.dirname(path), False) # Make map if not exists\n urllib.request.urlretrieve( url, path ) # Download file\n except Exception as e:\n cnsl.log(f'Error in download {e}', cfg.error)\n else:\n cnsl.log(f'[{ymd.now()}] Download success', verbose)\n ok = True\n else:\n cnsl.log(f'Url {url} does not exist', True)\n\n # Flood server protection\n if cfg.download_flood_protection_active:\n wait = cfg.download_interval_time\n if wait < download_interval_time_min: \n wait = download_interval_time_min\n time.sleep(wait)\n\n return ok\n\ndef download_read_file(url, file, verbose=cfg.verbose):\n '''Function downloads a file, read the file and return the content of the file'''\n ok, t = False, cfg.e\n cnsl.log(f'[{ymd.now()}] download and read', verbose)\n cnsl.log(f'Url: {url}', verbose)\n cnsl.log(f'To file: {url}', verbose)\n if check_for_internet_connection():\n ok = download( url, file, verbose )\n if ok: \n ok, t = read(file)\n else:\n t = 'Cannot download file. There is no internet connection'\n cnsl.log(t, cfg.error)\n return ok, t\n\ndef request(url, type='txt', verbose=cfg.verbose):\n '''Function makes the request based on the url given as parameter\n The return values are: ok, True if success else False... And the text From\n the request.'''\n ok, t = False, cfg.e\n cnsl.log(f'[{ymd.now()}] {type} - request', verbose)\n cnsl.log(f'Url: {url}', verbose)\n with threading.Lock():\n try:\n resp = urllib.request.urlopen( url )\n data = resp.read()\n if type == 'text':\n t = data\n elif type == 'json':\n t = json.loads(data)\n except Exception as e:\n cnsl.log(f'Error request\\n{e}', cfg.error)\n else:\n cnsl.log(f'[{ymd.now()}] Request success', verbose)\n ok = True\n return ok, t\n\ndef request_text(url, verbose=cfg.verbose):\n '''Function makes an online request for a text file'''\n return request(url, 'txt', verbose)\n\ndef request_json( url, verbose=cfg.verbose):\n '''Function makes an online request for a json file'''\n return request(url, 'json', verbose)\n\ndef has_internet(ip=cfg.check_ip_1, port=cfg.check_port_80, verbose=cfg.verbose):\n '''Function checks if there is a internet connection available'''\n cnsl.log(f'[{ymd.now()}] check internet connection', verbose)\n cnsl.log(f'IP {ip}', verbose)\n\n ok, wait = False, 0.1\n with threading.Lock():\n try:\n address = (ip, port)\n sock = socket.create_connection(address=address)\n except Exception as e:\n cnsl.log(f'Check failed\\n{e}', verbose)\n else:\n cnsl.log('Check succes', verbose)\n sock.close()\n ok = True\n\n time.sleep(wait)\n\n # try:\n # requests.head(ip, timeout=cfg.check_timeout)\n # except Exception as e:\n # cnsl.log(f'Check failed\\n{e}', True)\n # else:\n # cnsl.log('Check succes', verbose)\n # ok = True\n\n # time.sleep(wait)\n\n # try:\n # urllib.request.urlopen( ip ) #Python 3.x\n # except Exception as e:\n # cnsl.log(f'Check failed\\n{e}', verbose)\n # else:\n # cnsl.log('Check succes', verbose)\n # ok = True\n\n return ok\n\ndef check_for_internet_connection(verbose=cfg.verbose):\n '''Function checks for multiple ip's and port for a internte connection'''\n if has_internet( ip=cfg.check_ip_4,\n port=cfg.check_port_dns,\n verbose=verbose ):\n return True\n elif has_internet( ip=cfg.check_ip_1, \n port=cfg.check_port_80, \n verbose=verbose ):\n return True\n elif has_internet( ip=cfg.check_ip_2,\n port=cfg.check_port_80,\n verbose=verbose ):\n return True\n elif has_internet( ip=cfg.check_ip_3,\n port=cfg.check_port_dns,\n verbose=verbose ):\n return True\n return False\n\ndef url_exists(url, verbose=cfg.verbose):\n '''Function checks if a url exists. Return True or False'''\n ok = False\n cnsl.log(f'[{ymd.now()}] check url existence', verbose)\n cnsl.log(f'Url {url}', verbose)\n with threading.Lock():\n try:\n resp = requests.head(url)\n ok = True if resp.status_code == 200 else False\n except Exception as e:\n cnsl.log(f'Error url exist\\n{e}', cfg.error)\n else:\n cnsl.log('Url exists', verbose)\n ok = True\n return ok\n\ndef rm_lst(lst = [], remove_empty=False, verbose=cfg.verbose):\n '''Function tries to remove all files in the list.'''\n cnsl.log(f'[{ymd.now()}] remove files in list', verbose)\n for path in list(set(lst)): # Make list unique and walkthrough paths\n if delete(path, verbose): # Remove file from disk\n dir = os.path.dirname(path) # Get map from path\n # Remove maps only empty maps and remove_empty is True\n while is_dir_empthy(dir, verbose) and remove_empty:\n rm_dir(dir, verbose) # Remove empty map\n dir = os.path.dirname(dir) # Go to upper dir\n\ndef lst_download(\n uries = [], # List with download urls\n paths = [], # List with names for the files from the download urls\n check = True, # Already download check. Do not overwrite download file\n verbose=cfg.verbose # Output to screen\n ):\n '''Downloads images based on two lists. It combines an uries list with a path list'''\n cnsl.log(f'[{ymd.now()}] download list', verbose)\n if not paths: # If no paths, make them based on the uries\n for url in uries:\n locnet = urlparse(url).netloc.split('.')[-2].lower()# Get name url\n name, ext = os.path.splitext(os.path.basename(url)) # Get name and extension\n ext = validate.extension(ext) # Handle dot.\n path = mk_path(cfg.dir_data, f'{locnet}_{name}{ext}') # Make image path\n paths.append(path) # Add to list\n\n res_urie, res_path, max = [], [], cfg.download_max_num\n # Combine/zip the two lists (with download paths and web urls) into one list\n for path, url in tuple(zip(paths[:max], uries[:max])): # Loop through img and url objects\n ok = download(url, path, check, verbose) # Download file\n if ok: # Success add to lists\n res_urie.append(url)\n res_path.append(path)\n\n return res_urie, res_path # Return correct paths\n\ndef lst_maps(map, recursive=True, secret=True, verbose=cfg.verbose):\n '''Returns a list with all the (sub) directories'''\n lst = []\n cnsl.log(f'[{ymd.now()}] check for directories', verbose)\n cnsl.log(f'In map {map}', verbose)\n for el in os.listdir(map):\n # skip secret maps\n if not secret: \n if el[0] == '.': \n continue \n\n path = mk_path(map, el)\n if os.path.isdir(path): # Check paths if is a map\n cnsl.log(f'Map found: {map}', verbose)\n lst.append(path)\n if recursive: # Add subdirectories too\n lst += lst_maps(path)\n\n return lst\n\ndef lst_files_dir(\n dir, # Dir to search for files\n extensions = cfg.e, # List of extensions or one string ext to search the map for\n keywords = cfg.e, # List of keyword or one string to search the directory for\n case_insensitive = True, # Search case insensitive. True by default.\n verbose = cfg.verbose # Overwrite verbose option\n ):\n '''Function list files in a directory. The list can be filtered by keywords\n and extensions.'''\n results = [] # List with found paths\n paths = convert.to_lst(dir)\n cnsl.log(f'[{ymd.now()}] list files directory(s)', verbose)\n cnsl.log(f'Dir(s) {str(paths)}', verbose)\n\n for path in paths: # All paths\n if not os.path.exists(path): # Check map\n cnsl.log(f'Path {path} does not exist', verbose)\n else:\n # Check types and add to an (empty) list element if needed\n len_ext, len_key = 0, 0\n if extensions:\n extensions = convert.to_lst(extensions)\n len_ext = len(extensions)\n if keywords:\n keywords = convert.to_lst(keywords)\n len_key = len(keywords)\n \n filter_on = True if (len_key + len_ext) > 0 else False # Filter on ?\n \n if len_key > 0: \n cnsl.log(f'Search words { str(keywords) }', verbose)\n if len_ext > 0: \n cnsl.log(f'Search extensions { str(extensions) }', verbose)\n\n # Validate extensions, add point if needed\n if len_key > 0: \n extensions = [validate.extension(ext) for ext in extensions]\n\n # Get all the files in the directory\n files = [f for f in os.listdir(path) if os.path.isfile( mk_path(path,f) )]\n\n # Make search lists case in-sensitive if set\n if case_insensitive:\n extensions, keywords = [e.lower() for e in extensions], [k.lower() for k in keywords]\n\n # Filter files based on extensions and keywordsfname =\n for f in files:\n # Get name and extension\n fname, ext = os.path.splitext(f)\n\n # Make case insensitive if needed\n if case_insensitive: \n fname, ext = fname.lower(), ext.lower()\n\n found = True\n if filter_on: # Check to filter\n found_word, found_ext = False, False # Found is False by default\n # Check words and extensions\n if len_key > 0: # Check only if there are words to check\n # Check if keywords are in name\n if len([w for w in keywords if w in fname]) > 0:\n found_word = True # Part word is found\n else: # All words are Truelst_files_dirlst_files_dir\n found_word = True\n\n if len_ext > 0: # Check only if there are extensions to check\n # Check if extension is found\n if len([e for e in extensions if e == ext]) > 0:\n found_ext = True # Extension is found\n else: # All extensions are True\n found_ext = True\n\n # Both must True\n found = found_word or found_ext\n\n if found: # If found add file path to results\n path = mk_path(dir, f)\n cnsl.log(f'File found {path}', verbose)\n results.append(path)\n\n return results\n\ndef remove_file_and_empthy_maps_reverse(path, verbose=cfg.verbose):\n ok = delete(path, verbose=verbose) # Remove file\n if ok:\n while True: # Delete maps if empthy\n path = os.path.dirname(path) # Get map\n if is_dir_empthy( path, verbose=verbose): # Remove only empthy maps\n rm_dir( path, verbose=verbose) # remove map\n else:\n break # Not empthy do not delete\n\ndef remove_files_in_list( lst = [], verbose=cfg.verbose):\n '''Function tries to remove all downloaded images in the list.\n Removes a direcory if empty too.'''\n cnsl.log('Start remove files in list', verbose)\n lst = list(set(lst)) # Make list unique\n for path in lst: # Walkthrough paths\n remove_file_and_empthy_maps_reverse(path, verbose)\n\n cnsl.log('End remove files from list\\n', verbose)\n","repo_name":"Mark-Zwaving/weatherstats-nl","sub_path":"sources/control/fio.py","file_name":"fio.py","file_ext":"py","file_size_in_byte":19971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"42062170114","text":"'''\r\nUnit tests for the `functions` package.\r\n\r\n:see: http://docs.python.org/lib/minimal-example.html for an intro to unittest\r\n:see: http://agiletesting.blogspot.com/2005/01/python-unit-testing-part-1-unittest.html\r\n:see: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/305292\r\n'''\r\nfrom __future__ import division\r\n__docformat__ = \"restructuredtext en\"\r\n\r\nfrom tests_config import econpy #tests_config.py modifies sys.path to find econpy\r\nimport unittest\r\nimport random\r\nimport numpy\r\nfrom econpy.functions import polyuv\r\n\r\n#BEGIN lst:function.simplest_horner\r\n#goal: evaluate polynomial at point x\r\n#input:\r\n#\tcoefficients : tuple (ordered as (a_0,...,a_N))\r\n#\tx : number (point of evaluation)\r\n#output:\r\n#\tresult : number (value of polynomial at x)\r\ndef simplest_horner(coefficients, x):\r\n\tresult = 0\r\n\tfor coef in reversed(coefficients):\r\n\t\tresult = coef + result * x\r\n\treturn result\r\n#END lst:function.simplest_horner\r\n\r\n#BEGIN lst:simplest_horner01\r\n#goal: evaluate polynomial p and derivative p' at point x\r\n#input:\r\n#\tcoefficients : tuple (ordered as (a_0,...,a_N))\r\n#\tx : number (point of evaluation)\r\n#output:\r\n#\tresult : number,number = p(x), p'(x)\r\ndef horner01(coefficients, x):\r\n\tp0 = 0\r\n\tp1 = 0\r\n\tfor coef in reversed(coefficients):\r\n\t\tp1 = p1*x + p0\r\n\t\tp0 = p0*x + coef\r\n\treturn p0, p1\r\n#END lst:simplest_horner01\r\n\r\nclass test_functions(unittest.TestCase):\r\n\tcoefficients = random.sample(range(1,20), 5)\r\n\tcoefs = [1.0, 0.2, 1.0, -0.4]\r\n\tdef test_polyderiv(self):\r\n\t\ta = self.coefficients\r\n\t\tb = [ (i+1)*a[i+1] for i in range(len(a)-1) ]\r\n\t\tself.assertEqual(b, polyuv.polyderiv(a))\r\n\t\tc = [ (i+1)*b[i+1] for i in range(len(b)-1) ]\r\n\t\tself.assertEqual(c, polyuv.polyderiv(b))\r\n\t\tself.assertEqual(c, polyuv.polyderiv(a,d=2))\r\n\tdef test_horner(self):\r\n\t\ta = self.coefficients\r\n\t\tb = [ (i+1)*a[i+1] for i in range(len(a)-1) ]\r\n\t\tc = [ (i+1)*b[i+1] for i in range(len(b)-1) ]\r\n\t\tx = random.random()\r\n\t\tref0 = simplest_horner(a,x)\r\n\t\tref1 = simplest_horner(b,x)\r\n\t\tref2 = simplest_horner(c,x)\r\n\t\tself.assertEqual( ref0, polyuv.horner(a,x) )\r\n\t\tself.assertEqual( ref0, polyuv.horner01(a,x)[0] )\r\n\t\tself.assertEqual( ref0, polyuv.horner012(a,x)[0] )\r\n\t\tself.assertEqual( ref1, polyuv.horner01(a,x)[1] )\r\n\t\tself.assertEqual( ref1, polyuv.hornerd(a,x,1) )\r\n\t\tself.assertEqual( ref1, polyuv.horner012(a,x)[1] )\r\n\t\tself.assertEqual( ref2, polyuv.horner012(a,x)[2] )\r\n\t\tself.assertEqual( ref2, polyuv.hornerd(a,x,2) )\r\n\tdef test_PolynomailUV(self):\r\n\t\tp1 = polyuv.PolynomialUV(self.coefs)\r\n\t\tx = -1.9\r\n\t\tself.assertEqual( p1(x), ((-0.4*x + 1.0)*x + 0.2)*x + 1.0 )\r\n\tdef test_horner(self):\r\n\t\tx = random.random()\r\n\t\tself.assertEqual( simplest_horner(self.coefs, x), polyuv.horner(self.coefs, x) )\r\n\tdef test_deriv(self):\r\n\t\tcoefs = range(6)\r\n\t\trandom.shuffle(coefs)\r\n\t\tp = numpy.poly1d(list(reversed(coefs)))\r\n\t\tfor n in range(len(coefs)):\r\n\t\t\tself.assertEqual(polyuv.polyderiv(coefs, n), list(p.deriv(m=n).c)[::-1] ) \r\n\r\n'''\r\nzeros = p1.zeros()\r\nfor z in zeros:\r\n\tprint z, p1(z)\r\np2 = Polynomial([[1.,0.3],[-0.2,0.5]])\r\ny = 0.3\r\nprint p2(x,y), 1. + 0.3*y - 0.2*x + 0.5*x*y\r\nfit = fitPolynomial(2, [1.,2.,3.,4.], [2.,4.,8.,14.])\r\nprint fit.coeff\r\n\r\np = Polynomial([1., 1.])\r\ninvp = 1./p\r\npr = RationalFunction(p)\r\nprint pr+invp\r\n'''\r\n\r\n\r\n\r\nif __name__==\"__main__\":\r\n\tunittest.main()\r\n\r\n","repo_name":"vernadici/econpy","sub_path":"tests/test_functions.py","file_name":"test_functions.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"9616869466","text":"import click\nimport csv\nimport flask\nimport psycopg2\nimport psycopg2.extras\nimport dateutil.parser\nfrom datetime import datetime, timedelta, timezone\nfrom tzlocal import get_localzone\nfrom werkzeug.local import LocalProxy\nfrom flask import g, request, jsonify, render_template\n\nimport json_encoder\n\n\napp = flask.Flask(__name__)\napp.json_encoder = json_encoder.JSONEncoder\n\n\ndef connect_db():\n return psycopg2.connect(\n \"dbname=mahana\",\n cursor_factory=psycopg2.extras.NamedTupleCursor)\n\ndef get_db():\n if not hasattr(g, 'database'):\n g.database = connect_db()\n return g.database\n\n\n@app.teardown_appcontext\ndef close_db(error):\n db_connection = getattr(g, 'database', None)\n if db_connection is None:\n return\n\n if error is None:\n db_connection.commit()\n else:\n db_connection.rollback()\n db_connection.close()\n\n\ndb = LocalProxy(get_db)\n\n\ndef save_datapoints(sensor_name, data_points):\n cursor = db.cursor()\n for dp in data_points:\n cursor.execute(\"\"\"\n INSERT INTO temperature_samples (sensor_name, sample_time, temperature)\n VALUES (%s, %s, %s);\"\"\", \n [sensor_name, dp[0], dp[1]]);\n\n\ndef get_datapoints(sensor_name, start_timestamp=None):\n cursor = db.cursor()\n cursor.execute(\"\"\"\n SELECT sample_time, temperature\n FROM\n (SELECT \n sample_time, temperature,\n row_number() OVER (ORDER BY id ASC) as row_number\n FROM\n temperature_samples\n WHERE\n sensor_name = %(sensor_name)s\n ORDER BY\n sample_time ASC) as t\n WHERE \n t.row_number %% %(take_every)s = 0\n AND (%(start_timestamp)s IS NULL OR t.sample_time > %(start_timestamp)s)\n \"\"\", {\n \"sensor_name\": sensor_name,\n \"start_timestamp\": start_timestamp,\n \"take_every\": 10\n })\n\n for row in cursor:\n yield (row.sample_time, row.temperature)\n\n@app.route(\"/graph/\")\ndef graph_sensor(sensor_name):\n return render_template(\n \"graph.html\",\n sensor_name=sensor_name,\n days_to_fetch=request.args.get(\"days\", 7, int))\n\n@app.route(\"/api/\", methods=[\"GET\", \"POST\"])\ndef api_sensor(sensor_name):\n if request.method == \"GET\":\n days_to_fetch = request.args.get(\"days\", 7, int)\n start_timestamp = datetime.now() - timedelta(days=days_to_fetch)\n return jsonify(list(get_datapoints(sensor_name, start_timestamp=start_timestamp)))\n else:\n json = request.get_json()\n save_datapoints(sensor_name, ((dateutil.parser.parse(ts).replace(tzinfo=timezone.utc), temperature) for ts, temperature in json))\n return \"OK\", 200\n\n@click.group()\ndef cli():\n pass\n\n\n@cli.command(\"csv\")\n@click.argument('sensor', type=str)\n@click.argument('csvfile', type=click.File('w'))\ndef dump_csv(sensor, csvfile):\n local_tz = get_localzone()\n with app.app_context():\n data = get_datapoints(sensor)\n with csvfile:\n csvwriter = csv.writer(csvfile)\n csvwriter.writerow(['Time', 'Temperature'])\n for ts, temp in data:\n csvwriter.writerow([ts.astimezone(local_tz).strftime(\"%Y-%m-%d %H:%M:%S\"), temp])\n \n\n@cli.command()\n@click.option(\"--port\", default=8900, help=\"The port to use\")\ndef run(port=8900):\n app.run(host=\"0.0.0.0\", port=port)\n\nif __name__ == \"__main__\":\n\tcli()\n","repo_name":"marcellarius/mahana","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"25874796443","text":"import sys\nimport rclpy\nfrom rclpy.node import Node\nfrom sensor_msgs.msg import LaserScan\n\ndef callback_scan(msg):\n ## extract ranges from message\n scan=list(msg.ranges)\n print(\" Scan min: %f front: %f\" % ( min(scan),scan[362]))\n print ()\n\n\ndef main(args=None):\n if len(sys.argv) < 2:\n sys.stderr.write('Usage: sys.argv[0] \\n')\n sys.exit(1)\n rclpy.init()\n nr=sys.argv[1]\n\n node = Node('listener')\n\n # Subscribe topics and bind with callback functions\n node.create_subscription(LaserScan, f\"/pioneer{nr}/scan\", callback_scan, 10)\n\n # spin(node) simply keeps python from exiting until this node is stopped\n rclpy.spin(node)\n\n node.destroy_node()\n rclpy.shutdown()\n\nif __name__ == '__main__':\n main()\n","repo_name":"JakubBacik/MobileRobotics","sub_path":"lab1/subscriber_scan.py","file_name":"subscriber_scan.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"14863222202","text":"\nfrom src.data_manager import DataManager\nfrom numpy import *\nfrom src.norm_type import NormType\nfrom src.normalizer import Normalizer\nfrom src.log_res import LogRes\n\nurl = '../../resources/german_data.txt'\nsplit_sign = \" \"\ndata = DataManager.load_data(url, False, False, split_sign)\n\ncategorical_mask = [True, False, True, True, False, True, True, False, True, True, False, True, False, True, True, False, True, False, True, True]\n\ninputs = array([x[:19] for x in data])\ntarget = array([y[20] for y in data])\ntarget = array([0 if y == '2' else 1 for y in target])\n# decode labeled data to numerical values\ninputs = DataManager.categorize_data(inputs, categorical_mask)\n\n# we can not normalize data that was hot encoded to numerical values\n# here we assume that all data is already in numerical type\ninputs = Normalizer.normalize(inputs.astype(float), NormType.stand_norm, [0, 1, 2, 3, 4, 5, 6])\n\nX_train, X_test, y_train, y_test = DataManager.train_test_split(inputs, target, test_size=0.4, random_state=0)\n\n\nlog_res = LogRes(X_train, y_train, X_test, y_test)\nerror = 0.001\nresults = log_res.grad_desc_fit(error)\nlog_res.fit_test()\nprint(results)\n\n\n\"\"\"\nlogreg = linear_model.LogisticRegression(tol=1e-10)\nlogreg.fit(X_train,y_train)\nprint(logreg.coef_)\n\"\"\"","repo_name":"maciejbihun9/inteligentna_siec","sub_path":"src/run_scripts/run_german_data_log_res.py","file_name":"run_german_data_log_res.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"10567195500","text":"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n#fill directories as text\nbasefolder = 'E:/OneDrive/0_IST_GroundwatCh/IRBM/Assignment_2/Results_NGS'\nfolders = {'Run4' : 'Reference Water Quality', 'Run5' : 'Water quality after fertilization '}\nheader = 7 #header is the key and extension the value\nextension = '.srn'\nfigs_folder = 'E:/OneDrive/0_IST_GroundwatCh/IRBM/Assignment_2/Figures - Report'\n\nfiles = os.listdir()\n\ndatabase = pd.DataFrame()\n\nos.chdir(basefolder)\nfor folder, scenario in folders.items():\n os.chdir(folder) \n files = [ file for file in os.listdir() if file.endswith(extension)] #select extensions \n #iterate through the files within a certain extension to open it and append to dataframe\n for file in files:\n df = pd.read_csv(file, header = header, delim_whitespace=True)\n df = df.loc [~np.isnan(df.YY)]\n df['scenario'] = scenario\n df['extension'] = extension\n ID = file.split('.')[0]\n df['id'] = ID\n database = database.append(df)\n os.chdir('../')\ndates = df.YY.astype('str').str[0:4] + '-' + df.MM.astype('str').str[:-2] + '-01'\ndatabase['dates'] = dates\ndatabase['dates'] = pd.to_datetime(database.dates, format = '%Y-%m-%d')\n# database = database.iloc[:,7:]\nprint('database')\ndatabase = database.loc [database.MM < 7]\ndatabase.to_csv('E:\\\\OneDrive\\\\0_IST_GroundwatCh\\\\IRBM\\\\Assignment_2\\\\Results_NGS\\\\Run4-5_{}.csv'.format(extension[1:]))\n","repo_name":"SauloVSFh/Hydro-geo-logy","sub_path":"Mohid_PostProc/wq_generatingdatabase.py","file_name":"wq_generatingdatabase.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"73267944470","text":"import math\nimport paddle\nfrom net import MatchPyramidLayer\n\n\nclass StaticModel():\n def __init__(self, config):\n self.cost = None\n self.config = config\n self._init_hyper_parameters()\n\n def _init_hyper_parameters(self):\n self.emb_path = self.config.get(\"hyper_parameters.emb_path\")\n self.sentence_left_size = self.config.get(\n \"hyper_parameters.sentence_left_size\")\n self.sentence_right_size = self.config.get(\n \"hyper_parameters.sentence_right_size\")\n self.vocab_size = self.config.get(\"hyper_parameters.vocab_size\")\n self.emb_size = self.config.get(\"hyper_parameters.emb_size\")\n self.kernel_num = self.config.get(\"hyper_parameters.kernel_num\")\n self.hidden_size = self.config.get(\"hyper_parameters.hidden_size\")\n self.hidden_act = self.config.get(\"hyper_parameters.hidden_act\")\n self.out_size = self.config.get(\"hyper_parameters.out_size\")\n self.channels = self.config.get(\"hyper_parameters.channels\")\n self.conv_filter = self.config.get(\"hyper_parameters.conv_filter\")\n self.conv_act = self.config.get(\"hyper_parameters.conv_act\")\n self.pool_size = self.config.get(\"hyper_parameters.pool_size\")\n self.pool_stride = self.config.get(\"hyper_parameters.pool_stride\")\n self.pool_type = self.config.get(\"hyper_parameters.pool_type\")\n self.pool_padding = self.config.get(\"hyper_parameters.pool_padding\")\n self.learning_rate = self.config.get(\n \"hyper_parameters.optimizer.learning_rate\")\n\n def create_feeds(self, is_infer=False):\n sentence_left = paddle.static.data(\n name=\"sentence_left\",\n shape=[-1, self.sentence_left_size],\n dtype='int64')\n sentence_right = paddle.static.data(\n name=\"sentence_right\",\n shape=[-1, self.sentence_right_size],\n dtype='int64')\n feeds_list = [sentence_left, sentence_right]\n return feeds_list\n\n def net(self, input, is_infer=False):\n pyramid_model = MatchPyramidLayer(\n self.emb_path, self.vocab_size, self.emb_size, self.kernel_num,\n self.conv_filter, self.conv_act, self.hidden_size, self.out_size,\n self.pool_size, self.pool_stride, self.pool_padding,\n self.pool_type, self.hidden_act)\n prediction = pyramid_model.forward(input)\n\n if is_infer:\n fetch_dict = {'prediction': prediction}\n return fetch_dict\n\n # calculate hinge loss\n pos = paddle.slice(\n prediction, axes=[0, 1], starts=[0, 0], ends=[64, 1])\n neg = paddle.slice(\n prediction, axes=[0, 1], starts=[64, 0], ends=[128, 1])\n loss_part1 = paddle.subtract(\n paddle.full(\n shape=[64, 1], fill_value=1.0, dtype='float32'), pos)\n loss_part2 = paddle.add(loss_part1, neg)\n loss_part3 = paddle.maximum(\n paddle.full(\n shape=[64, 1], fill_value=0.0, dtype='float32'),\n loss_part2)\n avg_cost = paddle.mean(loss_part3)\n\n self.inference_target_var = avg_cost\n self._cost = avg_cost\n\n fetch_dict = {'cost': avg_cost}\n return fetch_dict\n\n def create_optimizer(self, strategy=None):\n optimizer = paddle.optimizer.Adam(\n learning_rate=self.learning_rate, lazy_mode=True)\n if strategy != None:\n import paddle.distributed.fleet as fleet\n optimizer = fleet.distributed_optimizer(optimizer, strategy)\n optimizer.minimize(self._cost)\n\n def infer_net(self, input):\n return self.net(input, is_infer=True)\n","repo_name":"PaddlePaddle/PaddleRec","sub_path":"models/match/match-pyramid/static_model.py","file_name":"static_model.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","stars":3956,"dataset":"github-code","pt":"49"} +{"seq_id":"8053288141","text":"#!/usr/bin/env python\nimport multiprocessing\nimport os\nimport time\nfrom multiprocessing import Pool\n\nimport numpy as np\nfrom scipy import sparse\n\nimport util\n\n__author__ = \"Razin Shaikh and Minjie Lyu\"\n__credits__ = [\"Razin Shaikh\", \"Minjie Lyu\", \"Vladimir Brusic\"]\n__version__ = \"1.0\"\n__status__ = \"Prototype\"\n\ndef get_sparse(filename, start_index, end_index):\n '''Generates the sparse matrix from the CSV file in the range of line start_index to line end_index-1 \n \n Arguments:\n filename {string} -- Path of CSV file\n start_index {int} -- The starting line number from CSV file for sparse matrix\n end_index {int} -- The (last line number + 1) from CSV file for sparse matrix\n \n Returns:\n scipy.sparse.csr_matrix -- The compressed sparse matrix generated from data read from CSV file\n '''\n\n test = []\n period = 1000\n with open(filename, encoding = 'UTF-8') as content:\n for i, line in enumerate(content):\n if(i >= start_index and i < end_index):\n line = line.replace('\\n', '').split(',')\n temp = sparse.csr_matrix(line[1:], dtype=float)\n temp.eliminate_zeros()\n test.append(temp)\n if(i % period == 1):\n test = [sparse.vstack(test)]\n elif(i >= end_index):\n break\n test = sparse.vstack(test)\n\n return test\n\n\ndef compress_file(fn, save=True):\n '''Compresses a CSV file to scipy.sparse matrix\n \n Arguments:\n fn {string} -- Path of CSV file\n \n Keyword Arguments:\n save {bool} -- Determines whether the file should be saved to disk (.npz file) (default: {True})\n \n Returns:\n [string, scipy.sparse.csr_matrix] -- Return the path of saved file if save=True, \n else returns the compressed scipy.sparse csr matrix.\n ''' \n \n filename = fn\n num_lines = sum(1 for line in open(filename, encoding='UTF-8'))\n dvide = multiprocessing.cpu_count()\n length = num_lines // dvide\n index = []\n for i in range(dvide):\n index.append(length * i)\n index.append(num_lines)\n index[0]=1\n\n p = Pool(dvide)\n res_l =[]\n for i in range(dvide):\n res = p.apply_async(get_sparse, args=(filename, index[i], index[i+1]))\n res_l.append(res)\n p.close()\n p.join()\n\n x = []\n for i in range(dvide):\n x.append(res_l[i].get())\n\n data = sparse.vstack(x)\n del res_l\n del x\n if save: \n sparse.save_npz(filename.split('.')[0] + '.npz', data.tocsc())\n return filename.split('.')[0] + '.npz'\n else:\n return data.tocsc()\n","repo_name":"oftensmile/SingleCellAnalysis","sub_path":"compress.py","file_name":"compress.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"17107040030","text":"from app.repositories.base_repo import BaseRepo\nfrom app.models.client import Client\n\n\nclass ClientRepo(BaseRepo):\n def __init__(self):\n BaseRepo.__init__(self, Client)\n\n def new_client(\n self,\n institution_name,\n institution_url,\n institution_city,\n institution_country,\n institution_size,\n status,\n start_date,\n is_deleted=False,\n ):\n client = Client(\n institution_name=institution_name,\n institution_url=institution_url,\n institution_city=institution_city,\n institution_country=institution_country,\n institution_size=institution_size,\n status=status,\n start_date=start_date,\n is_deleted=is_deleted,\n )\n\n client.save()\n return client\n","repo_name":"Maxcutex/pm_api","sub_path":"app/repositories/client_repo.py","file_name":"client_repo.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"14556903823","text":"#write a programe to create GUI interface\n\nfrom tkinter import *\n\nroot = Tk()\n\nroot.title(\"My Project\")\n\nroot.geometry(\"500x500\")\n\nname = Label(root,text=\"Enter your name: \")\nname.place(x=10,y=10)\n\ne1=Entry(root)\ne1.place(x=110,y=12.5)\n\nbtn=Button(root,text=\"Submit\",font=(\"Ariel 10\"),foreground=\"white\", background=\"black\")\nbtn.place(x=75,y=50)\n","repo_name":"AnshuPtl/Python","sub_path":"Task/Tkinter.py","file_name":"Tkinter.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"73268513110","text":"#!/bin/env python\n# -*- coding: utf-8 -*-\n# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python\n\"\"\"\ntest_is_complex\n\"\"\"\nimport paddle\nimport pytest\n\n\n@pytest.mark.api_base_is_complex_vartype\ndef test_is_complex0():\n \"\"\"\n int\n \"\"\"\n x = paddle.to_tensor([[1, 3], [3, 2], [5, 6]])\n assert not paddle.is_complex(x)\n assert not paddle.Tensor.is_complex(x)\n\n\n@pytest.mark.api_base_is_complex_parameters\ndef test_is_complex1():\n \"\"\"\n float\n \"\"\"\n x = paddle.rand((4,))\n assert not paddle.is_complex(x)\n assert not paddle.Tensor.is_complex(x)\n\n\n@pytest.mark.api_base_is_complex_parameters\ndef test_is_complex2():\n \"\"\"\n complex\n \"\"\"\n types = [\"complex64\", \"complex128\"]\n z = paddle.rand((3,)) + 1j * paddle.rand((3,))\n for dtype in types:\n z = z.astype(dtype)\n assert paddle.is_complex(z)\n assert paddle.Tensor.is_complex(z)\n","repo_name":"PaddlePaddle/PaddleTest","sub_path":"framework/api/paddlebase/test_is_complex.py","file_name":"test_is_complex.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"49"} +{"seq_id":"19781241664","text":"import time\nimport unittest\n\nfrom srbai.SintaktickiOperatori.spellcheck import SpellCheck\n\n\nclass SpellCheckTests(unittest.TestCase):\n def test_spellcheck(self):\n\n sc = SpellCheck('sr-latin')\n print('done reading dic and initializing')\n word = \"predetori\"\n start_time = time.time()\n correction = sc.spellcheck(word)\n if correction:\n print(f\"Did you mean '{correction}'?\")\n else:\n print(\"No close match found.\")\n end_time = time.time()\n duration = end_time - start_time\n print(\"duration was:\" + str(duration))\n\n word = \"rdnici\"\n start_time = time.time()\n correction = sc.spellcheck(word)\n if correction:\n print(f\"Did you mean '{correction}'?\")\n else:\n print(\"No close match found.\")\n end_time = time.time()\n duration = end_time - start_time\n print(\"duration was:\" + str(duration))\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"Serbian-AI-Society/SrbAI","sub_path":"tests/SpellCheck_test.py","file_name":"SpellCheck_test.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"49"} +{"seq_id":"32234327741","text":"import datetime\nimport logging\nimport warnings\nfrom contextlib import nullcontext\nfrom typing import Optional, Set, List\n\nimport requests\nfrom django.db.models import OuterRef, Q, Subquery, F\nfrom slugify import slugify\nfrom urllib3.exceptions import InsecureRequestWarning\n\nfrom importer import JSON\nfrom importer.models import CachedObject, ExternalList\nfrom mainapp.functions.search import search_bulk_index\nfrom mainapp.models import (\n LegislativeTerm,\n Location,\n Body,\n File,\n Person,\n Organization,\n Membership,\n Meeting,\n Paper,\n Consultation,\n AgendaItem,\n)\nfrom mainapp.models.file import fallback_date\nfrom meine_stadt_transparent import settings\n\nlogger = logging.getLogger(__name__)\n\nimport_order = [\n LegislativeTerm,\n Location,\n Body,\n File,\n Person,\n Organization,\n Membership,\n Meeting,\n Paper,\n Consultation,\n AgendaItem,\n] # type: List[Type[DefaultFields]]\n\n\ndef requests_get(url, params=None, retries: int = 3, **kwargs) -> requests.Response:\n \"\"\"Makes a request with the custom user agent and retry on connection error\"\"\"\n user_agent = \"{} ({})\".format(\n slugify(settings.PRODUCT_NAME), settings.TEMPLATE_META[\"github\"]\n )\n kwargs.setdefault(\"headers\", {})\n kwargs[\"headers\"][\"User-Agent\"] = user_agent\n # Hack to make Landshut work with the RIS' broken SSL setup\n if settings.SSL_NO_VERIFY:\n kwargs[\"verify\"] = False\n with warnings.catch_warnings() if settings.SSL_NO_VERIFY else nullcontext():\n if settings.SSL_NO_VERIFY:\n warnings.filterwarnings(\"ignore\", category=InsecureRequestWarning)\n while True:\n try:\n response = requests.get(url, params, **kwargs)\n response.raise_for_status()\n return response\n except requests.exceptions.ConnectionError as e:\n retries -= 1\n if retries == 0:\n raise\n logger.error(f\"Error {e} in request for {url}, retrying\")\n\n\ndef externalize(\n libobject: JSON, key_callback: Optional[Set[str]] = None\n) -> List[CachedObject]:\n \"\"\"Converts an oparl object with embedded objects to multiple flat json objects\"\"\"\n\n externalized = []\n\n # sorted copies, thereby avoiding modification while iterating\n for key in sorted(libobject.keys()):\n # Skip the geojson object\n if key == \"geojson\":\n continue\n\n entry = libobject[key]\n\n if isinstance(entry, dict):\n if \"id\" not in entry:\n logger.warning(\n f\"Embedded object '{key}' in {libobject['id']} does not have an id,\"\n f\" skipping: {entry}\"\n )\n del libobject[key]\n continue\n\n if isinstance(key_callback, set):\n key_callback.add(key)\n entry[\"mst:backref\"] = libobject[\"id\"]\n\n externalized += externalize(entry)\n libobject[key] = entry[\"id\"]\n\n if isinstance(entry, list) and len(entry) > 0 and isinstance(entry[0], dict):\n if isinstance(key_callback, set):\n key_callback.add(key)\n for pos, entry in enumerate(entry):\n if \"id\" not in entry:\n logger.warning(\n f\"Embedded object '{key}' in {libobject['id']} does not have an\"\n f\" id, skipping: {entry}\"\n )\n del libobject[key]\n break\n\n entry[\"mst:backref\"] = libobject[\"id\"]\n entry[\"mst:backrefPosition\"] = pos # We need this for agenda items\n\n externalized += externalize(entry)\n libobject[key][pos] = entry[\"id\"]\n\n externalized.append(\n CachedObject(\n url=libobject[\"id\"],\n data=libobject,\n oparl_type=libobject[\"type\"].split(\"/\")[-1],\n )\n )\n\n return externalized\n\n\ndef clear_import(prefix: str, include_cache: bool = True) -> None:\n \"\"\"Clear all data from the oparl api identified by the prefix\"\"\"\n for class_object in import_order:\n name = class_object.__name__\n stats = class_object.objects.filter(oparl_id__startswith=prefix).delete()\n logger.info(f\"{name}: {stats}\")\n if include_cache:\n deleted = CachedObject.objects.filter(url__startswith=prefix).delete()\n logger.info(f\"{deleted} cached objects deleted\")\n deleted = ExternalList.objects.filter(url__startswith=prefix).delete()\n logger.info(f\"{deleted} external lists deleted\")\n\n\ndef import_update(\n body_id: Optional[str] = None,\n ignore_modified: bool = False,\n download_files: bool = True,\n) -> None:\n from importer.importer import Importer\n from importer.loader import get_loader_from_body\n\n if body_id:\n bodies = Body.objects.filter(oparl_id=body_id).all()\n else:\n bodies = Body.objects.filter(oparl_id__isnull=False).all()\n for body in bodies:\n logger.info(f\"Updating body {body}: {body.oparl_id}\")\n loader = get_loader_from_body(body.oparl_id)\n importer = Importer(loader, body, ignore_modified=ignore_modified)\n importer.update(body.oparl_id)\n importer.force_singlethread = True\n if download_files:\n importer.load_files(\n fallback_city=settings.GEOEXTRACT_SEARCH_CITY or body.short_name,\n update=True,\n )\n\n\ndef fix_sort_date(import_date: datetime.datetime):\n \"\"\"\n Tries to guess the correct sort date for all papers and files that were created no later\n than import_date by looking at\n a) the legal date,\n b) the the date of the earliest consultation or\n c) falling back to fallback_date\n \"\"\"\n logger.info(\"Fixing the sort date of the papers\")\n # Use the date of the earliest consultation\n earliest_consultation = (\n Consultation.objects.filter(paper=OuterRef(\"pk\"), meeting__isnull=False)\n .order_by(\"meeting__start\")\n .values(\"meeting__start\")[:1]\n )\n papers_with_consultation = (\n Paper.objects.filter(Q(sort_date=fallback_date) | ~Q(sort_date=F(\"legal_date\")))\n .annotate(earliest_consultation=Subquery(earliest_consultation))\n .filter(earliest_consultation__isnull=False)\n # We filter on these to only update those necessary in elasticsearch\n .filter(\n ~Q(sort_date=F(\"earliest_consultation\"))\n & ~Q(display_date=F(\"earliest_consultation\"))\n )\n )\n num = papers_with_consultation.update(\n sort_date=F(\"earliest_consultation\"), display_date=F(\"earliest_consultation\")\n )\n if settings.ELASTICSEARCH_ENABLED:\n search_bulk_index(Paper, papers_with_consultation)\n logger.info(f\"{num} sort dates were fix by the earliest consultation\")\n\n logger.info(\"Fixing the sort date of the files\")\n num = File.objects.filter(\n created__lte=import_date, legal_date__isnull=False\n ).update(sort_date=F(\"legal_date\"), modified=F(\"legal_date\"))\n logger.info(f\"{num} files were changed\")\n\n earliest_paper = (\n Paper.objects.filter(files__pk=OuterRef(\"pk\"))\n .order_by(\"sort_date\")\n .values(\"sort_date\")[:1]\n )\n file_with_paper = (\n File.objects.filter(legal_date__isnull=True)\n .annotate(earliest_paper=Subquery(earliest_paper))\n .filter(earliest_paper__isnull=False)\n # We filter on these to only update those necessary in elasticsearch\n .filter(~Q(sort_date=F(\"earliest_paper\")))\n )\n num = file_with_paper.update(sort_date=F(\"earliest_paper\"))\n if settings.ELASTICSEARCH_ENABLED:\n search_bulk_index(Paper, file_with_paper)\n logger.info(f\"{num} files updated\")\n","repo_name":"meine-stadt-transparent/meine-stadt-transparent","sub_path":"importer/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":7775,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"49"} +{"seq_id":"37193934395","text":"# Problem Link\n# https://www.hackerrank.com/challenges/crush/problem?h_l=interview&playlist_slugs%5B%5D%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D%5B%5D=arrays\n\nn, m = map(int, input().split())\nq = [0] * (n + 2)\n\nfor _ in range(m):\n a, b, k = map(int, input().split())\n q[a] += k\n q[b + 1] -= k\n \n# print(q)\nans = cursum = 0\nfor x in q:\n cursum += x\n ans = max(cursum, ans)\n\nprint(ans)\n","repo_name":"sounishnath003/practice-180D-strategy","sub_path":"hackerrank-interview-preparation-kit/arrays/array-manipulation.py","file_name":"array-manipulation.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"49"} +{"seq_id":"2831938955","text":"\"\"\"\nCreated on Sat Sep 21 23:44:35 2013\n\"\"\"\n\n# Pseudo-Random Number Generator\n\"\"\"\nThe following random number methods are below.\n\n-------- Uniform Distribution --------\n1. Lehmer modulo generator\n\n-------- Gaussian Distribution --------\n2. Polar method generator\n3. Box-Muller generator\n\"\"\"\n\nimport numpy as np\nfrom math import sqrt,log,cos,pi\n\n\n# 1\n# Lehmer modulo generator\n#*************************************************************\n# NOTE: this generates a random uniform distribution over [0,1]\n#-----------------------------------------------------------\n# seed = initial seed to start the random sequence\n# n = how many random numbers to generate in array\n#-----------------------------------------------------------\ndef randUniform(seed,n):\n # define constants to use in algorithm\n a = 16807.0\n xm = 2147483647.0\n x0 = 2147483711.0\n\n # array to store random uniform numbers\n rand = []\n\n # loop through modulo routine to generate numbers\n for i in range(n):\n seed = (a*seed) % xm\n rand.append(seed/x0)\n\n return np.array(rand)\n#*************************************************************\n\n\n# 2\n# Polar method generator\n#*************************************************************\n# NOTE: this generates a random normal distribution\n# on the real line from a random uniform generator\n#-----------------------------------------------------------\n# mean = mean (center of distribution)\n# std = standard deviation (spread of distribution)\n# n = how many random numbers to generate in array\n#-----------------------------------------------------------\ndef randNormal1(mean,std,n):\n # array to store random normal numbers\n rand = []\n\n # loop to generate random uniform numbers\n # and turn into random normal numbers\n count = 0\n while count < n:\n\n # get two random uniform numbers to start polar method\n # NOTE: uses built-in numpy.random.uniform\n u1 = np.random.uniform(0,1,1)[0]\n u2 = np.random.uniform(0,1,1)[0]\n\n # define two new values to start polar method\n v1 = 2.0*u1 - 1.0\n v2 = 2.0*u2 - 1.0\n\n # define check value\n s = v1**2 + v2**2\n\n # check condition\n if s < 1:\n # define random normal numbers\n rand1 = v1*sqrt((-2.0*log(s))/s)\n\n # append to random array\n rand.append(rand1)\n\n # increment counter\n count = count + 1\n\n return np.array(rand)*std + mean\n#*************************************************************\n\n\n# 3\n# Box-Muller generator\n#*************************************************************\n# NOTE: this generates a random normal distribution\n# on the real line from a random uniform generator\n#-----------------------------------------------------------\n# mean = mean (center of distribution)\n# std = standard deviation (spread of distribution)\n# n = how many random numbers to generate in array\n#-----------------------------------------------------------\ndef randNormal2(mean,std,n):\n # array to store random normal numbers\n rand = []\n\n # get two random uniform numbers to start method\n # NOTE: uses built-in numpy.random.uniform\n u1 = np.random.uniform(0,1,n)\n u2 = np.random.uniform(0,1,n)\n\n # loop to generate random uniform numbers\n # and turn into random normal numbers\n count = 0\n while count < n:\n\n # define random normal numbers\n rand1 = sqrt(-2.0*log(u1[count]))*cos(2*pi*u2[count])\n\n # append to random array\n rand.append(rand1)\n\n # increment counter\n count = count + 1\n\n return np.array(rand)*std + mean\n#*************************************************************","repo_name":"ncrump/PyModules","sub_path":"RandomNumber.py","file_name":"RandomNumber.py","file_ext":"py","file_size_in_byte":3716,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"49"} +{"seq_id":"27771849082","text":"# container_frr\n\n\ndef make(rc):\n rspec = rc.rspec\n\n frr = rspec[\"frr\"]\n daemons = [\n # daemons\n \"bgpd=yes\",\n \"ospfd=no\",\n \"ospf6d=no\",\n \"ripd=no\",\n \"ripngd=no\",\n \"isisd=no\",\n \"pimd=no\",\n \"ldpd=no\",\n \"nhrpd=no\",\n \"eigrpd=no\",\n \"babeld=no\",\n \"sharpd=no\",\n \"pbrd=no\",\n \"bfdd=no\",\n \"fabricd=no\",\n \"vrrpd=no\",\n \"pathd=no\",\n # daemon options\n \"vtysh_enable=yes\",\n 'zebra_options=\" -A 127.0.0.1 -s 90000000\"',\n 'bgpd_options=\" -A 127.0.0.1\"',\n 'ospfd_options=\" -A 127.0.0.1\"',\n 'ospf6d_options=\" -A ::1\"',\n 'ripd_options=\" -A 127.0.0.1\"',\n 'ripngd_options=\" -A ::1\"',\n 'isisd_options=\" -A 127.0.0.1\"',\n 'pimd_options=\" -A 127.0.0.1\"',\n 'ldpd_options=\" -A 127.0.0.1\"',\n 'nhrpd_options=\" -A 127.0.0.1\"',\n 'eigrpd_options=\" -A 127.0.0.1\"',\n 'babeld_options=\" -A 127.0.0.1\"',\n 'sharpd_options=\" -A 127.0.0.1\"',\n 'pbrd_options=\" -A 127.0.0.1\"',\n 'staticd_options=\"-A 127.0.0.1\"',\n 'bfdd_options=\" -A 127.0.0.1\"',\n 'fabricd_options=\"-A 127.0.0.1\"',\n 'vrrpd_options=\" -A 127.0.0.1\"',\n 'pathd_options=\" -A 127.0.0.1\"',\n ]\n frr_daemons_txt = \"\\n\".join(daemons).replace('\"', '\\\\\"')\n cmds = [\n f'txt=\"{frr_daemons_txt}\"',\n 'echo \"$txt\" > /etc/frr/daemons',\n \"sed -i 's/StartLimitInterval=.*/StartLimitInterval=10s/' /usr/lib/systemd/system/frr.service\",\n \"systemctl daemon-reload\",\n \"systemctl restart frr\",\n ]\n rc.exec(cmds, title=\"frr_daemons\")\n\n vtysh_cmds = [\n \"configure terminal\",\n \"log file /var/log/frr/frr.log\",\n \"debug bgp keepalives\",\n \"debug bgp neighbor-events\",\n \"debug bgp updates\",\n \"ip forwarding\",\n \"ipv6 forwarding\",\n \"!\",\n ]\n\n # prefix_listとroute_mapの定義\n for name, route_map in frr.get(\"route_map\", {}).items():\n for prefix in route_map.get(\"prefix_list\", []):\n vtysh_cmds += [f\"ip prefix-list {name} {prefix}\"]\n vtysh_cmds += [f\"route-map {name} {route_map['policy']} {route_map['order']}\"]\n if len(route_map.get(\"prefix_list\", [])) > 0:\n if route_map[\"version\"] == 4:\n vtysh_cmds += [f\"match ip address prefix-list {name}\"]\n elif route_map[\"version\"] == 0:\n vtysh_cmds += [f\"match ipv6 address prefix-list {name}\"]\n vtysh_cmds += [\"exit\"]\n\n # setup interfaces\n for link in rspec.get(\"links\", []):\n if \"bgp_peer_group\" in link:\n vtysh_cmds += [\n f\"interface {link['link_name']}\",\n \"ipv6 nd ra-interval 10\",\n \"no ipv6 nd suppress-ra\",\n \"!\",\n ]\n for vlan_id, vlan in link.get(\"vlan_map\", {}).items():\n if \"bgp_peer_group\" in vlan:\n vtysh_cmds += [\n f\"interface {link['link_name']}.{vlan_id}\",\n \"ipv6 nd ra-interval 10\",\n \"no ipv6 nd suppress-ra\",\n \"!\",\n ]\n for link in rspec.get(\"_links\", []):\n if \"bgp_peer_group\" in link:\n vtysh_cmds += [\n f\"interface {link['peer_name']}\",\n \"ipv6 nd ra-interval 10\",\n \"no ipv6 nd suppress-ra\",\n \"!\",\n ]\n for vlan_id, vlan in link.get(\"vlan_map\", {}).items():\n if \"bgp_peer_group\" in vlan:\n if \"peer_ovs\" in vlan:\n vtysh_cmds += [\n f\"interface {vlan['peer_ovs']['peer_name']}\",\n \"ipv6 nd ra-interval 10\",\n \"no ipv6 nd suppress-ra\",\n \"!\",\n ]\n else:\n vtysh_cmds += [\n f\"interface {link['peer_name']}.{vlan_id}\",\n \"ipv6 nd ra-interval 10\",\n \"no ipv6 nd suppress-ra\",\n \"!\",\n ]\n\n # seup bgp\n vtysh_cmds += [\n f\"router bgp {frr['asn']}\",\n f\"bgp router-id {frr['id']}\",\n \"bgp bestpath as-path multipath-relax\",\n \"no bgp ebgp-requires-policy\",\n \"no bgp network import-check\", # RIBにnetworkが存在するかをチェックするのを止める\n ]\n\n for bgp_peer_group in frr.get(\"bgp_peer_groups\", []):\n vtysh_cmds += [\n f\"neighbor {bgp_peer_group['name']} peer-group\",\n f\"neighbor {bgp_peer_group['name']} remote-as external\",\n f\"neighbor {bgp_peer_group['name']} capability extended-nexthop\",\n ]\n for link in rspec.get(\"links\", []):\n if \"bgp_peer_group\" in link:\n vtysh_cmds += [f\"neighbor {link['link_name']} interface peer-group {link['bgp_peer_group']}\"]\n for vlan_id, vlan in link.get(\"vlan_map\", {}).items():\n if \"bgp_peer_group\" in vlan:\n vtysh_cmds += [f\"neighbor {link['link_name']}.{vlan_id} interface peer-group {vlan['bgp_peer_group']}\"]\n for link in rspec.get(\"_links\", []):\n if \"bgp_peer_group\" in link:\n vtysh_cmds += [f\"neighbor {link['peer_name']} interface peer-group {link['bgp_peer_group']}\"]\n for vlan_id, vlan in link.get(\"vlan_map\", {}).items():\n if \"bgp_peer_group\" in vlan:\n if \"peer_ovs\" in vlan:\n vtysh_cmds += [f\"neighbor {vlan['peer_ovs']['peer_name']} interface peer-group {vlan['bgp_peer_group']}\"]\n else:\n vtysh_cmds += [f\"neighbor {link['peer_name']}.{vlan_id} interface peer-group {vlan['bgp_peer_group']}\"]\n\n vtysh_cmds += [\"address-family ipv4 unicast\"]\n for network in frr.get(\"ipv4_networks\", []):\n vtysh_cmds += [f\"network {network}\"]\n for bgp_peer_group in frr.get(\"bgp_peer_groups\", []):\n if \"route_map_v4_in\" in bgp_peer_group:\n vtysh_cmds += [f\"neighbor {bgp_peer_group['name']} route-map {bgp_peer_group['route_map_v4_in']} in\"]\n if \"route_map_v4_out\" in bgp_peer_group:\n vtysh_cmds += [f\"neighbor {bgp_peer_group['name']} route-map {bgp_peer_group['route_map_v4_out']} out\"]\n vtysh_cmds += [\"exit-address-family\"]\n\n vtysh_cmds += [\"address-family ipv6 unicast\"]\n for network in frr.get(\"ipv6_networks\", []):\n vtysh_cmds += [f\"network {network}\"]\n for bgp_peer_group in frr.get(\"bgp_peer_groups\", []):\n if \"route_map_v6_in\" in bgp_peer_group:\n vtysh_cmds += [f\"neighbor {bgp_peer_group['name']} route-map {bgp_peer_group['route_map_v6_in']} in\"]\n if \"route_map_v6_out\" in bgp_peer_group:\n vtysh_cmds += [f\"neighbor {bgp_peer_group['name']} route-map {bgp_peer_group['route_map_v6_out']} out\"]\n vtysh_cmds += [\"exit-address-family\"]\n\n vtysh_cmds += [\n \"!\",\n \"line vty\",\n \"!\",\n ]\n vtysh_cmds_str = \"\\n\".join(vtysh_cmds)\n cmds = [f'vtysh -c \"{vtysh_cmds_str}\"']\n rc.exec(cmds, title=\"frr_vtysh\")\n","repo_name":"syunkitada/labo","sub_path":"fabfile/lib/node_utils/container_frr.py","file_name":"container_frr.py","file_ext":"py","file_size_in_byte":7106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"74354232148","text":"import os\nimport webapp2\nfrom google.appengine.ext import ndb\nfrom secrets import secrets\nfrom User.handlers import AuthHandler\nfrom MemeCreator.storeimage import UserMemeDb\nUSER_MEME_DB_NAME = 'user_meme_db'\n\ndef user_meme_dbkey(meme_userdbname=USER_MEME_DB_NAME):\n \"\"\"Constructs a Datastore key for a Guestbook entity with guestbook_name.\"\"\"\n return ndb.Key('user_meme_db', meme_userdbname)\nclass BaseRequestHandler(webapp2.RequestHandler):\n def get(self):\n self.response.write (\"theju%s\" % self.request) \n \nclass MigrationOH(AuthHandler):\n def get(self):\n sadmin = secrets.CheckSuperAdminRights(self.user_id) \n if sadmin == True:\n u_q = UserMemeDb.query(ancestor=user_meme_dbkey(USER_MEME_DB_NAME)).order(-UserMemeDb.date)\n memes = u_q.fetch()\n for meme in memes:\n meme.mode = 'gallery'\n meme.put()\n self.response.write('done')\n else:\n self.response.write('Not allowed')\n \n#https://www.facebook.com/dialog/oauth?client_id=xxxxxxx&redirect_uri=http://smashed.in/gettoken&scope=manage_pages\n\n#https://graph.facebook.com/oauth/access_token?client_id=xxxxxx&redirect_uri=http://smashed.in/gettoken&client_secret=xxxxxx&code=AQAPqKtA27TCQCtw8qt7aPxLK9RgiHeMif6pZr5Ww8zYVTr2trcK0jD38FN0G5QFZQ5vo4BVqrtCGPVAqFujUrXt5mCqNUHhXtvk9xN5f5ZJy6c2ffiJeWvt8a9bUz3zzd6zfgjn_XvqaZK7L1nLhXQVHfi9aoL5VH4AkNH5tolquCV5Vlf7sVsRJnQVtl3cqtrK0SXvofUZFuagB8bHzc5tiBlP0EaRkWyOzTF3r44CKpKEB7PG6cYWi71oqYm7jxWO1byFIe3QLYJN74NBJNgNXBoqSuuCuuS8QwjjNSTqjfoBcF39p-4DQ3_EakxZcQA\n\n#https://graph.facebook.com/smashed.in.7/accounts","repo_name":"thejasvibhat/smashed","sub_path":"checktoken.py","file_name":"checktoken.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"34940308892","text":"#https://www.acmicpc.net/problem/10025\n\nn,k=map(int,input().split())\nposition=[0]\ninputArray=[0 for i in range(4000001)]\n\nsum=0\nans=0\nfor i in range(n):\n g,x=map(int,input().split())\n position.append(x)\n inputArray[x]=g\n\nk=2*k+1\nfor i in range(max(position)+1):\n if i>=k:\n sum-=inputArray[i-k]\n sum+=inputArray[i]\n ans=max(sum,ans)\n\nprint(ans)\n","repo_name":"Chaeros/BeakJoon","sub_path":"10025번 게으른 백곰(S3).py","file_name":"10025번 게으른 백곰(S3).py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"36716949122","text":"import math\n\ndef bucketSort(nums):\n numberOfBuckets = round(math.sqrt(len(nums)))\n maxValue = max(nums)\n temp = []\n\n for bucket in range(numberOfBuckets):\n temp.append([])\n \n for num in nums:\n index_b = math.ceil(num*numberOfBuckets/maxValue)\n temp[index_b-1].append(num)\n\n for bucket in temp:\n bucket.sort()\n\n k = 0\n for i in range(numberOfBuckets):\n for j in range(len(temp[i])):\n nums[k] = temp[i][j]\n k += 1\n \n return nums\n\nnums = [1, 7,2,5,3,90,10]\n\nprint(bucketSort(nums))\n \n\n","repo_name":"sarvar2003/Algorithms","sub_path":"Sorting/bucketSort.py","file_name":"bucketSort.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"49"} +{"seq_id":"10015904129","text":"import psutil\n\nimport time\n\nfrom datetime import datetime\n\ncurrentAvailableRam_GB = 15.9\n\nprint(psutil.swap_memory().percent)\nprint(psutil.virtual_memory().percent)\nprint(psutil.virtual_memory())\n\npercentInt = psutil.virtual_memory().percent\n\nmemoryUsed_GB = ( percentInt / 100 ) * currentAvailableRam_GB\n\nmemoryLeft_GB = float(currentAvailableRam_GB - memoryUsed_GB)\n# for testing\n# memoryLeft_GB = 3.\n\nprint(\"memoryLeft_GB\",memoryLeft_GB)\nprint(type(memoryLeft_GB))\n\n\nwhile True:\n now = datetime.now()\n\n current_time = now.strftime(\"%H:%M:%S\")\n print(\"Current Time =\", current_time)\n time.sleep(1)\n\n if(memoryLeft_GB < 4.):\n print(\"kill the ml process\")\n PROCNAME = \"python.exe\"\n for proc in psutil.process_iter():\n # print(proc)\n if proc.name() == PROCNAME:\n print(proc)\n pid = proc.pid\n p = psutil.Process(pid)\n print(proc.memory_info()[0])\n how_much_memory_is_used = proc.memory_info()[0]\n if(proc.memory_info()[0] > 30 * 1000 * 1000):\n print(\"about to kill the ml process\")\n p.terminate() # or p.kill()\n\n\n else:\n print(\"nothing to kill just yet\")\n # nothing to kill just yet\n\n\n\n\n# virtual_memory\n# print()","repo_name":"gg64du02/dc-tts-and_dc-tts-transfer-learning-bns","sub_path":"swap_prevention/swap_prevention.py","file_name":"swap_prevention.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"70795205590","text":"from nicegui import ui\nfrom niceguiToolkit.layout.webui.functionalSet.model import T_BuilderContext, T_Builder\nfrom niceguiToolkit.layout.webui.functionalSet.functionals import (\n components as common_cps,\n)\n\n\ndef isShow_fn(context: T_BuilderContext):\n return True\n\n\ndef build_font_size(context: T_BuilderContext):\n style_name = \"font-size\"\n\n def onchange(e):\n value = e.value\n if value:\n context.apply_styles({style_name: f\"{e.value}rem\"})\n else:\n context.remove_styles(style_name)\n\n init_value = None\n init_value_str = context.element._style.get(style_name, None)\n if init_value_str:\n init_value = float(init_value_str[:-3])\n\n with ui.row().classes(\"flex-center\"):\n ui.number(\n \"字体大小\", min=0, value=init_value, step=0.1, on_change=onchange\n ).classes(\"flex\")\n\n\ndef build_font_color(context: T_BuilderContext):\n style_name = \"color\"\n inin_value = context.element._style.get(style_name, \"\")\n\n def onchange(e):\n value = e.value\n if value == \"\":\n context.remove_styles(style_name)\n return\n\n context.apply_styles({style_name: e.value})\n\n with ui.row().classes(\"flex-center\"):\n common_cps.color_input(\"字体颜色\", value=inin_value, on_change=onchange)\n\n\ndef build_fn(context: T_BuilderContext):\n build_font_size(context)\n ui.separator()\n build_font_color(context)\n\n\ndef _get_builder():\n return T_Builder(\n title=\"decorate(修饰)\", order=10, is_show_fn=isShow_fn, build_fn=build_fn\n )\n","repo_name":"CrystalWindSnake/nicegui-toolkit","sub_path":"niceguiToolkit/layout/webui/functionalSet/functionals/decorate.py","file_name":"decorate.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"49"} +{"seq_id":"72868874068","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def helper(self, root, current_depth, v, d):\n if not root:\n return\n \n if current_depth == d - 1:\n node1 = TreeNode(v)\n if root:\n node1.left = root.left\n root.left = node1\n \n node2 = TreeNode(v)\n if root:\n node2.right = root.right\n root.right = node2\n else:\n self.helper(root.right, 1+current_depth, v, d)\n self.helper(root.left, 1+current_depth, v, d)\n \n \n def addOneRow(self, root, v, d):\n \"\"\"\n :type root: TreeNode\n :type v: int\n :type d: int\n :rtype: TreeNode\n \"\"\"\n if d == 1:\n node = TreeNode(v)\n node.left = root\n return node\n \n self.helper(root, 1, v, d)\n return root\n \n","repo_name":"jasujaayush/Random","sub_path":"leetcode/add-one-row-to-tree.py","file_name":"add-one-row-to-tree.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"70960091668","text":"import struct\nfrom nonblock import nonblock_read\nimport time\nfrom gpiozero import LED \nREWARD_DURATION = 0.5\n\nclass Mouse:\n def __init__(self,device_name = 'mouse0'):\n self.path = '/dev/input/' + device_name\n self.file = open(self.path,'rb')\n self.x = 0 \n self.y = 0\n\n def position(self):\n data = nonblock_read(self.file)\n l = int(len(data) / 3)\n for i in range(l):\n dx,dy = struct.unpack('bb',data[1+i*3:3+i*3])\n self.x += dx\n self.y += dy\n print(self.x, self.y)\n return self.x, self.y\n\ndef in_window(tStart,tCur, dur):\n timePassed = tCur - tStart\n if timePassed >= 0 and timePassed <= dur: return True\n else: return False\n\nclass GPIO:\n def __init__(self):\n self.actor = LED(21)\n self.t = -100\n \n def setReward(self):\n self.t = time.time()\n \n def writePins(self):\n if in_window(self.t, time.time(), REWARD_DURATION): \n self.actor.on()\n else:\n self.actor.off()\n\nif __name__ == '__main__':\n M0 = Mouse('mouse0')\n M1 = Mouse('mouse1')\n lt = time.time()\n\n while True:\n x0,y0 = M0.position()\n x1,y1 = M1.position()\n print('M0: ',(x0,y0), 'M1:', (x1,y1))\n time.sleep(0.1)\n lt = time.time()\n\n\n ","repo_name":"Crazyonxh/mouseVR","sub_path":"Pi/hardware.py","file_name":"hardware.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"42522090023","text":"# import cv2\r\n# import numpy as np\r\n# import tesserocr as tr\r\n# from PIL import Image\r\n\r\n\r\n# image = cv2.imread(\"testout.png\")\r\n\r\n# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n# cv2.imshow('gray', image)\r\n\r\n# thresh = cv2.adaptiveThreshold(gray, 250, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 255, 1)\r\n# cv2.imshow('thresh', thresh)\r\n\r\n# kernel = np.ones((1, 1), np.uint8)\r\n# img_dilation = cv2.dilate(thresh, kernel, iterations=1)\r\n\r\n# ctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n# sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0])\r\n\r\n# clean_plate = 255 * np.ones_like(img_dilation)\r\n\r\n# for i, ctr in enumerate(sorted_ctrs):\r\n# x, y, w, h = cv2.boundingRect(ctr)\r\n\r\n# roi = img_dilation[y:y + h, x:x + w]\r\n\r\n# # these are very specific values made for this image only - it's not a factotum code\r\n# if h > 10 and w > 10:\r\n# rect = cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n# clean_plate[y:y + h, x:x + w] = roi\r\n# cv2.imshow('ROI', rect)\r\n# cv2.imwrite('roi.png', roi)\r\n\r\n# img = cv2.imread(\"roi.png\")\r\n\r\n# blur = cv2.medianBlur(img, 1)\r\n# cv2.imshow('4 - blur', img) \r\n# cv2.waitKey(0)\r\n# cv2.destroyAllWindows() \r\n\r\n# pil_img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\r\n\r\n# api = tr.PyTessBaseAPI()\r\n\r\n# try:\r\n# api.SetImage(pil_img)\r\n# boxes = api.GetComponentImages(tr.RIL.TEXTLINE, True)\r\n# text = api.GetUTF8Text()\r\n\r\n# finally:\r\n# api.End()\r\n\r\n# # clean the string a bit\r\n# text = str(text).strip()\r\n\r\n# print(text)\r\n# cv2.waitKey(0)\r\n\r\n\r\n\r\ndef testing():\r\n import numpy as np\r\n import cv2\r\n\r\n image = cv2.imread(\"testout.png\")\r\n image = cv2.resize(image, (400, 100), interpolation = cv2.INTER_AREA)\r\n grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n rows,cols = grayImage.shape\r\n for i in range(rows):\r\n for j in range(cols):\r\n if grayImage[i,j] > 50:\r\n image[i,j] = 255\r\n else:\r\n image[i,j] = 0\r\n kernel = np.ones((5,5), np.uint8)\r\n kernel2 = np.ones((3,3), np.uint8)\r\n\r\n image = cv2.erode(image, kernel, iterations=1) \r\n image = cv2.dilate(image, kernel2, iterations=1) \r\n\r\n grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n dst = cv2.Canny(grayImage, 50, 100)\r\n \r\n #ret, bw_img = cv2.threshold(dst,127,255,cv2.THRESH_BINARY)\r\n\r\n #dst = cv2.morphologyEx(grayImage, cv2.MORPH_RECT, np.zeros((5,5), np.uint8),iterations=1)\r\n contours, heirarchy = cv2.findContours(dst, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\r\n\r\n for i in range(0, len(contours)):\r\n print(cv2.contourArea(contours[i]))\r\n if cv2.contourArea(contours[i]) > 100:\r\n x,y,w,h = cv2.boundingRect(contours[i])\r\n # The w constrain to remove the vertical lines\r\n if w > 10:\r\n cv2.rectangle(image, (x, y), (x+w, y+h), (0, 0, 255), 1)\r\n cv2.imwrite(\"contour.jpg\", image)\r\n\r\n cv2.imshow('sample',image)\r\n cv2.imshow('grayImage',grayImage)\r\n cv2.imshow('dst',dst)\r\n \r\n\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\ntesting()","repo_name":"HashiniKumanayake468/RealTimeParkingSystem-Detect-ReadtheVehicleLicensePlate","sub_path":"updated-number-plate-reader.py","file_name":"updated-number-plate-reader.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"28726591805","text":"from django.urls import path\nfrom .views import Index\nfrom . import views\n\napp_name = \"news_board\"\n\nurlpatterns = [\n path(\"\", Index.as_view(), name=\"index\"),\n path(\"api/read\", views.read_news, name=\"read_all\"),\n path(\"api/read/\", views.detail_news, name=\"read\"),\n path(\"api/update/\", views.update_news, name=\"update\"),\n path(\"api/delete/\", views.delete_news, name=\"delete\"),\n path(\"api/create\", views.create_news, name=\"create\"),\n path(\n \"api/read//comments\",\n views.read_comments,\n name=\"read_commnets\",\n ),\n path(\n \"api/read//comments/create\",\n views.create_comment,\n name=\"create_commnet\",\n ),\n path(\n \"api/read//comments/\",\n views.read_one_comment,\n name=\"read_one_commnet\",\n ),\n path(\n \"api/read//comments//update\",\n views.update_comment,\n name=\"update_commnet\",\n ),\n path(\n \"api/read//comments//delete\",\n views.delete_comment,\n name=\"delete_commnet\",\n ),\n path(\"api/read//upvote\", views.upvote, name=\"upvote\"),\n]\n","repo_name":"renmarin/News-Board-API","sub_path":"news_board/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"9403802700","text":"import spacy\nnlp = spacy.load('en_core_web_sm')\n\nprint(\"------------------- example 1 -------------------\")\nexampletokens = nlp('cat apple monkey banana')\nfor token1 in exampletokens:\n for token2 in exampletokens:\n print(token1.text, token2.text, token1.similarity(token2))\n\n# it is interesting that the similarity function recognises that cat and monkey are similar as both are animals.\n# it is more interesting that it recognises that monkey and banana are similar as monkeys like bananas!\n\nprint(\"------------------- my words -------------------\")\nmytokens = nlp('ford honda river desert')\nfor token_a in mytokens:\n for token_b in mytokens:\n print(token_a.text, token_b.text, token_a.similarity(token_b))\n\n# I was surprised that this didn't return a stronger relation between river and ford or ford and honda\n# as ford and honda are both vehicle companies, and ford is also a name for a river crossing\n# I expected that these would have shown a higher similarity, but maybe I was too obscure with my connections\n# also, surprisingly, I received a negative relation between many of the words and I do not know how this is possible?\n\n# when running the script through the simpler SM language model, I received the below error message\n# however, I also received similarity scores for my words more in line with my expectations from the start.\n# This showed the connection between honda and ford and ford and river that I was expecting.\n\n#UserWarning: [W007]\n# The model you're using has no word vectors loaded,\n# so the result of the Token.similarity method will be based on the tagger, parser and NER,\n# which may not give useful similarity judgements.\n# This may happen if you're using one of the small models, e.g. `en_core_web_sm`,\n# which don't ship with word vectors and only use context-sensitive tensors.\n# You can always add your own word vectors, or use one of the larger models instead if available.","repo_name":"DanWrightNE/Task38-Semantic_Similarity","sub_path":"semantic.py","file_name":"semantic.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"15921836725","text":"import ipinfo\nimport sys\n\n# get the ip address from command line\n\ntry:\n ip_address = sys.argv[1]\nexcept IndexError:\n ip_address = None\n\n# access token for ipinfo.io\naccess_token = 'Insert your ipinfo.io token here'\n\n#Create a client object with token\nhandler = ipinfo.getHandler(access_token)\n\n# Get the IP info\ndetails = handler.getDetails(ip_address)\n\n# Print the Information we found\n\nfor key, value in details.all.items():\n print(f\"{key}: {value}\")\n\n","repo_name":"seqwith/Python_Projects","sub_path":"getIPInfo.py","file_name":"getIPInfo.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"24225861343","text":"from aiogram import Bot, types\nfrom aiogram.dispatcher import Dispatcher\nfrom os import getenv\nfrom json import load\nfrom tools.mongodb_tools import DataBase\nfrom userbot.userbot import check_session\n\nBOT_ID = getenv('BOT_ID')\nbot = Bot(token=getenv('TOKEN'))\ndp = Dispatcher(bot)\ndb = DataBase()\n\nasync def on_startup(_):\n general_check()\n print('[BOT] Bot has been started.')\n\ndef general_check():\n if db.check_connections():\n print('[DATABASE] Connected successfully.')\n else:\n print('[DATABASE] Connection is failed.')\n if check_session():\n print('[USERBOT] Userbot is working')\n else:\n print('[USERBOT] Userbot is not working')\n\ndef read_config():\n with open('E:\\\\Programming\\\\Python\\\\PyCharmProjects\\\\Downloader_BOT\\\\config.json', 'r', encoding='utf-8') as f:\n return load(f)\n\nasync def is_url(message: types.Message):\n try:\n message['entities'][0]['type']\n except IndexError:\n return False\n else:\n return True","repo_name":"AtecAtca/yt_downloader_bot","sub_path":"tools/bot_tools.py","file_name":"bot_tools.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"53511582137","text":"\nimport sys\nfor line in sys.stdin:\n line = line.strip()\n splits = line.split(\",\")\n if len(splits) == 3:\n citing = splits[1]\n cited_and_state = splits[0] + ',' +splits[2]\n print('%s\\t%s' %(citing,cited_and_state))\n\n else:\n patent_id = splits[0]\n location = splits[5]\n extra_field = splits[0]\n print('%s\\t%s\\t%s' %(patent_id,location,extra_field))\n\n\n\n\n\n","repo_name":"Pradyoth/CSCI-5253-Datacenter-Scale-Computing-Assignments","sub_path":"lab3-hadoop-join-patent-Pradyoth-master/mapper2.py","file_name":"mapper2.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"49"} +{"seq_id":"18295772580","text":"# Taken and modified from:\n# https://gist.githubusercontent.com/sjljrvis/0aa964f44fa81b2790b0887e21ae29aa/raw/033efe1936b975931ce7b8e815234378fb50bed3/classifier.py\n\nimport nltk\nfrom nltk.stem.lancaster import LancasterStemmer\nfrom nltk import word_tokenize,pos_tag\nimport os\nimport json\nimport datetime\nimport csv\nimport numpy as np\nimport time\n\nclass ClassifyIntent:\n\n stemmer = LancasterStemmer()\n\n # 3 classes of training data\n training_data = []\n\n words=[]\n classes=[]\n documents=[]\n\n ERROR_THRESHOLD = 0.2\n\n def __init__(self, dataset_file):\n with open(dataset_file, newline='') as f:\n r = csv.reader(f)\n for a in r:\n the_set = ()\n the_set = {\"class\": a[0], \"sentence\": a[1]}\n self.training_data.append(the_set)\n\n #compute sigmoid nonlinearity\n def __sigmoid(self, x):\n output=1/(1+np.exp(-x))\n return output\n\n #convert output of sigmoid function to its derivative\n def __sigmoid_output_to_derivative(self, output):\n return output*(1-output)\n\n def __clean_up_sentence(self, sentence):\n\n #tokenize the pattern\n sentence_words = nltk.word_tokenize(sentence)\n #stem each word\n sentence_words=[self.stemmer.stem(word.lower()) for word in sentence_words]\n return sentence_words\n\n #return bag of words array: 0 or 1 for each word in the bag that exists in the sentence\n def __bow(self, sentence, the_words, show_details=False):\n #tokenize the pattern\n sentence_words = self.__clean_up_sentence(sentence)\n #bag of words\n bag=[0]*len(the_words)\n for s in sentence_words:\n for i,w in enumerate(the_words):\n if w == s:\n bag[i] = 1\n if show_details:\n print (\"found in bag: %s\" % w)\n return(np.array(bag))\n\n\n def get_training_data(self):\n\n return self.training_data\n\n def preprocess_training_data(self):\n\n #organizing our data structures for documents , classes, words\n\n ignore_words=['?']\n\n #loop through each sentence in our training data\n for pattern in self.training_data:\n #tokenize in each word in the sentence\n w=nltk.word_tokenize(pattern['sentence'])\n #add to our words list\n self.words.extend(w)\n #add to documents in our corpus\n self.documents.append((w,pattern['class']))\n #add to our classes list\n if pattern['class'] not in self.classes:\n self.classes.append(pattern['class'])\n\n #stem and lower each word and remove duplicate\n self.words=[self.stemmer.stem(w.lower()) for w in self.words if w not in ignore_words]\n self.words=list(set(self.words))\n\n #remove duplicates\n classes=list(set(self.classes))\n\n print(len(self.documents),\" documents\")\n print(len(self.classes), \" classes\", self.classes)\n print(len(self.words),\" unique stemmed words\", self.words)\n\n #create our training data\n training=[]\n output=[]\n #create an empty array for our output\n output_empty=[0]*len(self.classes)\n\n #training set, bag of words for each sentence\n for doc in self.documents:\n #initialize our bag of words\n bag=[]\n #list of tokenized words for the pattern\n pattern_words=doc[0]\n #stem each word\n pattern_words=[self.stemmer.stem(word.lower()) for word in pattern_words]\n #create our bag of words array\n for w in self.words:\n bag.append(1) if w in pattern_words else bag.append(0)\n training.append(bag)\n #output is a 0 for each tag and 1 for current tag\n output_row=list(output_empty)\n output_row[self.classes.index(doc[1])] = 1\n output.append(output_row)\n\n X_rop = np.array(training) # rop: result of preprocess\n y_rop = np.array(output)\n\n return X_rop, y_rop\n\n def train(self, X, y, hidden_neurons=10, alpha=1, epochs=50000, dropout=False, dropout_percent=0.5):\n\n \"\"\"\n synapse_file = 'intent_class.json'\n\n if os.path.exists(synapse_file):\n # load our calculated synapse values\n print(\"training results exist, no need to train anymore\")\n with open(synapse_file) as data_file:\n synapse = json.load(data_file)\n synapse_0 = np.asarray(synapse['synapse0'])\n synapse_1 = np.asarray(synapse['synapse1'])\n return synapse_0, synapse_1\n \"\"\"\n\n print (\"Training with %s neurons, alpha:%s, dropout:%s %s\" % (hidden_neurons, str(alpha), dropout, dropout_percent if dropout else '') )\n print (\"Input matrix: %sx%s Output matrix: %sx%s\" % (len(X),len(X[0]),1, len(self.classes)) )\n np.random.seed(1)\n\n last_mean_error = 1\n # randomly initialize our weights with mean 0\n synapse_0 = 2*np.random.random((len(X[0]), hidden_neurons)) - 1\n synapse_1 = 2*np.random.random((hidden_neurons, len(self.classes))) - 1\n\n prev_synapse_0_weight_update = np.zeros_like(synapse_0)\n prev_synapse_1_weight_update = np.zeros_like(synapse_1)\n\n synapse_0_direction_count = np.zeros_like(synapse_0)\n synapse_1_direction_count = np.zeros_like(synapse_1)\n\n for j in iter(range(epochs+1)):\n\n # Feed forward through layers 0, 1, and 2\n layer_0 = X\n layer_1 = self.__sigmoid(np.dot(layer_0, synapse_0))\n\n if(dropout):\n layer_1 *= np.random.binomial([np.ones((len(X),hidden_neurons))],1-dropout_percent)[0] * (1.0/(1-dropout_percent))\n\n layer_2 = self.__sigmoid(np.dot(layer_1, synapse_1))\n\n # how much did we miss the target value?\n layer_2_error = y - layer_2\n\n if (j% 10000) == 0 and j > 5000:\n # if this 10k iteration's error is greater than the last iteration, break out\n if np.mean(np.abs(layer_2_error)) < last_mean_error:\n print (\"delta after \"+str(j)+\" iterations:\" + str(np.mean(np.abs(layer_2_error))) )\n last_mean_error = np.mean(np.abs(layer_2_error))\n else:\n print (\"break:\", np.mean(np.abs(layer_2_error)), \">\", last_mean_error )\n break\n\n # in what direction is the target value?\n # were we really sure? if so, don't change too much.\n layer_2_delta = layer_2_error * self.__sigmoid_output_to_derivative(layer_2)\n\n # how much did each l1 value contribute to the l2 error (according to the weights)?\n layer_1_error = layer_2_delta.dot(synapse_1.T)\n\n # in what direction is the target l1?\n # were we really sure? if so, don't change too much.\n layer_1_delta = layer_1_error * self.__sigmoid_output_to_derivative(layer_1)\n\n synapse_1_weight_update = (layer_1.T.dot(layer_2_delta))\n synapse_0_weight_update = (layer_0.T.dot(layer_1_delta))\n\n if(j > 0):\n synapse_0_direction_count += np.abs(((synapse_0_weight_update > 0)+0) - ((prev_synapse_0_weight_update > 0) + 0))\n synapse_1_direction_count += np.abs(((synapse_1_weight_update > 0)+0) - ((prev_synapse_1_weight_update > 0) + 0))\n\n synapse_1 += alpha * synapse_1_weight_update\n synapse_0 += alpha * synapse_0_weight_update\n\n prev_synapse_0_weight_update = synapse_0_weight_update\n prev_synapse_1_weight_update = synapse_1_weight_update\n \"\"\"\n now = datetime.datetime.now()\n\n # persist synapses\n synapse = { 'synapse0': synapse_0.tolist(), 'synapse1': synapse_1.tolist(),\n 'datetime': now.strftime(\"%Y-%m-%d %H:%M\"),\n 'words': self.words,\n 'classes': self.classes\n }\n\n with open(synapse_file, 'w') as outfile:\n json.dump(synapse, outfile, indent=4, sort_keys=True)\n print (\"saved synapses to:\", synapse_file)\n \"\"\"\n return synapse_0, synapse_1\n\n def classify(self, sentence, syn_0, syn_1, show_details=False):\n\n x = self.__bow(sentence.lower(), self.words, show_details)\n if show_details:\n print(\"sentence:\", sentence, \"\\n bow:\", x)\n #input layer is our bag of words\n l0=x\n # matrix multiplication of input and hidden layer\n l1 = self.__sigmoid(np.dot(l0, syn_0))\n # output layer\n l2 = self.__sigmoid(np.dot(l1, syn_1))\n\n results = l2\n results = [[i,r] for i,r in enumerate(results) if r > self.ERROR_THRESHOLD ]\n results.sort(key=lambda x: x[1], reverse=True)\n return_results =[[self.classes[r[0]],r[1]] for r in results]\n #print (\"%s \\n classification: %s\" % (sentence, return_results))\n\n # example of return_results:\n # [['greetings', 0.9971946847650717]]\n return return_results\n","repo_name":"zestium/konversa","sub_path":"src/konversa/helpers/nlu/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":9138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34529330960","text":"from kombu.common import Broadcast\n#broker_url = 'amqp://guest@104.199.213.33:5673/'\n#broker_url ='redis://104.199.213.33:6378/0'\n#broker_url ='redis://redis:6379/0'\n#result_backend ='redis://redis:6379/0'\n\nbroker_url = 'amqp://guest@rabbitmq'\n#result_backend = 'redis://:letronlgv2@redis:6379/1'\n\nignore_result=True\ntask_serializer = 'json'\ntask_acks_late = True\ntimezone = 'Asia/Taipei'\nworker_concurrency = 10\n\n#task_queues = (Broadcast('monitor_task'),)\n#task_routes ={\n# 'worker.worker.task_add': {\n# 'queue': 'monitor_task',\n# 'exchange': 'monitor_task'\n# }\n#}","repo_name":"m94221006/portal_prod","sub_path":"backend/worker/celeryconfig.py","file_name":"celeryconfig.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36340037115","text":"import glob\n\nfrom setuptools import setup\n\n\ndef findfiles(pat):\n return [x for x in glob.glob('share/' + pat)]\n\n\ndata_files = [\n]\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n# print \"data_files = %s\" % data_files\n\nsetup(\n name='pyqsp',\n version='0.1.6',\n author='I. Chuang, J. Docter, J.M. Martyn, Z. Rossi, A. Tan',\n author_email='ichuang@mit.edu',\n packages=['pyqsp', 'pyqsp.test', 'pyqsp.qsp_models'],\n scripts=[],\n url='https://github.com/ichuang/pyqsp',\n license='LICENSE.txt',\n description='Generate phase angles for quantum signal processing algorithms',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'pyqsp = pyqsp.main:CommandLine',\n ],\n },\n install_requires=['matplotlib',\n 'numpy>1.19.1',\n 'scipy',\n ],\n package_dir={'pyqsp': 'pyqsp'},\n test_suite=\"pyqsp.test\",\n)\n","repo_name":"kyledebry/pyqsp","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"3413829265","text":"# Написать класс и реализовать его методы: (основа - ДЗ № 11)\n#\n# 1. Инициализация класса с одним параметром - имя директории.\n#\n# 2. Написать метод экземпляра класса, который создает атрибут экземпляра класса в ввиде словаря\n# {'filenames': [список файлов в папке], 'dirnames': [список всех подпапок в папке]}.\n# Подпапки учитывать только первого уровня вложения. Папка в папке в папке - такое не брать ))\n#\n# 2. Написать метод экземпляра класса, которая получает булевое значение (True/False).\n# Функция возвращает тот же словарь, но с отсортированными именами файлов и папок в соответствующих списках.\n# Булевое значение True означает, что порядок сортировки алфавитный, False - обратный порядок.\n#\n# 3. Написать метод экземпляра класса, которая получает строку, которая может быть\n# или именем файла, или именем папки. (В имени файла должна быть точка).\n# В зависимости от того, что функция получила (имя файла или имя папки) - записать его в соответствующий список\n# и вернуть обновленный словарь.\n#\n# 4* (*сдавать не обязательно, но если будете сдавать, то ошибки будут учитываться тоже).\n# Написать метод экземпляра класса, которая получает имя директории.\n# Функция проверяет соответствие полученного словаря и реальной файловой системы в полученной папке и,\n# если надо, создает нужные папки и пустые файлы, в соответствии со структурой словаря.\nimport os\n\n\nclass WorkWithFolders:\n def __init__(self, dirname_exist: str, dirname_new: str, file_dir_name: str, sort_rev: bool = True):\n self.dirname_exist = dirname_exist\n self.sort_rev = sort_rev\n self.file_dir_name = file_dir_name\n self.dict_dirs_files = self.create_dict_lists()\n self.dirname_new = dirname_new\n\n def create_dict_lists(self):\n list_dirs_files = os.listdir(self.dirname_exist)\n dict_dirs_files = {'filenames': [],\n 'dirnames': [],\n }\n\n for obj in list_dirs_files:\n dir_path = os.path.join(self.dirname_exist, obj)\n if os.path.isfile(dir_path):\n dict_dirs_files['filenames'].append(obj)\n elif os.path.isdir(dir_path):\n dict_dirs_files['dirnames'].append(obj)\n return dict_dirs_files\n\n def sort_dict_lists(self):\n self.dict_dirs_files['filenames'].sort(reverse=not self.sort_rev)\n self.dict_dirs_files['dirnames'].sort(reverse=not self.sort_rev)\n\n def add_files_dirs_to_dict(self):\n if '.' in self.file_dir_name:\n self.dict_dirs_files['filenames'].append(self.file_dir_name)\n else:\n self.dict_dirs_files['dirnames'].append(self.file_dir_name)\n\n def create_new_files_dirs(self):\n os.makedirs(self.dirname_new, exist_ok=True)\n for key, list_obj in self.dict_dirs_files.items():\n for obj in list_obj:\n dir_path = os.path.join(self.dirname_new, obj)\n if not os.path.exists(dir_path) and key == 'filenames':\n with open(dir_path, 'w') as file:\n file.write('')\n elif key == 'dirnames':\n os.makedirs(dir_path, exist_ok=True)\n\n\ndirname_exist = 'alphabet' #имя директории для создания словаря\nsort_direct = False #параметр направления сортировки\nfile_dir_name = 'dir.txt' #имя папки/файла для добавления в словарь\ndirname_new = 'D_homework' #имя директории для создания объектов из словаря\n\nworker = WorkWithFolders(dirname_exist, dirname_new, file_dir_name, sort_direct)\n\nprint(worker.dict_dirs_files)\nworker.sort_dict_lists()\nworker.add_files_dirs_to_dict()\nprint(worker.dict_dirs_files)\nworker.create_new_files_dirs()\n\n","repo_name":"artnatan/pythonintro_14.03.2022","sub_path":"homework_lesson_12.py","file_name":"homework_lesson_12.py","file_ext":"py","file_size_in_byte":4820,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18321507815","text":"# %%\nimport torch\nimport numpy as np\nimport math,csv,gzip,os,io,copy,threading,re\nimport pandas as pd\nimport copy\nimport time as timee\nfrom contextlib import contextmanager\nfrom concurrent.futures import ThreadPoolExecutor,as_completed\nfrom multiprocessing import Pipe,Pool\nfrom scipy import stats\nfrom pathlib import Path\nimport networkx as nx\n\n# %%\n@contextmanager\ndef timer(title):\n t0 = timee.time()\n yield\n print(\"{} - done in {:.0f}s\".format(title, timee.time() - t0))\n\n\n\n\n\n\n# %%\ndef get_person_months(name_file,file):\n with timer('read file'):\n df_name=pd.read_csv(name_file)\n df_name_voc=pd.read_csv(file)\n \n df_name_voc['start_datetime']=pd.to_datetime(df_name_voc['start_datetime'])\n #df_voc['day']=df_voc['start_datetime'].dt.day\n df_name_voc['hour']=df_name_voc['start_datetime'].dt.hour\n \n print('read file finished')\n #df_name.set_index('phone_no_m')\n \n pattern='.*arpu.*'\n columns=df_name_voc.columns.copy()\n for column in columns:\n if re.match(pattern=pattern,string=column):\n del(df_name_voc[column])\n\n \n df_name_voc=df_name_voc.dropna() #删掉所有空缺值的行\n \n \n\n #有多少个人\n people=set(df_name_voc['phone_no_m'])\n #people=people[:6]\n \n #按周划分,每个元素是一个月的dataframe\n month_group=[g for _,g in df_name_voc.groupby(pd.Grouper(key='start_datetime',freq='W'))] \n \n print('divide month finished')\n \n \n '''month group is a list of dataframes'''\n #month_group=month_group[:1]\n #按人划分 \n with timer('group by person'):\n for idx,month in enumerate(month_group):\n month_group[idx]=[g for _,g in month.groupby('phone_no_m')] #将month_group的每个元素转换成人在本月的列表\n print('divide person finished')\n \n \n \n person_months={}\n with timer('month person to person month'):\n for person in people:\n person_months[person]=list() #这个人所有月的dataframe\n index=0\n for persons in month_group: #对所有月\n # if index>=9:\n # break\n # index+=1\n temp_people=people.copy()\n for person in persons: #对该月的所有人\n person=person.sort_values(by='start_datetime')\n person=person.reset_index(drop=True)\n who=person.loc[0,'phone_no_m']\n person_months[who].append(person.to_dict(orient='list'))\n temp_people.remove(who)\n for person in temp_people: #对不在该月的所有人\n person_months[person].append(pd.DataFrame(columns=df_name_voc.columns).to_dict(orient='list'))\n \n person_monthes=[]\n labels=[]\n df_name.set_index(keys='phone_no_m',inplace=True)\n for p,ms in person_months.items(): #对所有人\n person_monthes.append(ms) #ms是一个dict,是这个人在该时间片的数据\n labels.append(df_name.loc[p,'label'])\n return person_monthes,labels\n\ndef get_person_info_and_feature(name_file,file,place_code):\n with timer('read file'):\n df_name=pd.read_csv(name_file) # ID information file\n df_name_voc=pd.read_csv(file) #ID and voc information file\n \n df_name_voc['start_datetime']=pd.to_datetime(df_name_voc['start_datetime'])\n \n df_name_voc['hour']=df_name_voc['start_datetime'].dt.hour\n \n # drop the arpu columns\n pattern='.*arpu.*'\n columns=df_name_voc.columns.copy()\n for column in columns:\n if re.match(pattern=pattern,string=column):\n del(df_name_voc[column])\n\n df_name_voc=df_name_voc.dropna() \n \n #people set\n people=set(df_name_voc['phone_no_m'])\n \n \n \n # group all voc by week\n month_group=[g for _,g in df_name_voc.groupby(pd.Grouper(key='start_datetime',freq='W'))] \n \n print('divide month finished')\n \n \n # group all voc by person for each week \n with timer('group by person'):\n for idx,month in enumerate(month_group):\n month_group[idx]=[g for _,g in month.groupby('phone_no_m')] #将month_group的每个元素转换成人在本月的列表\n print('divide person finished')\n \n \n \n person_months={}\n with timer('month person to person month'):\n for person in people:\n person_months[person]=list() #这个人所有月的dataframe\n index=0\n for persons in month_group: #所有周\n # if index>=9:\n # break\n # index+=1\n temp_people=people.copy()\n for person in persons: #这周的所有人,这个人的dataframe按时间排序\n person=person.sort_values(by='start_datetime')\n person=person.reset_index(drop=True)\n who=person.loc[0,'phone_no_m']\n person_months[who].append(person.to_dict(orient='list'))\n temp_people.remove(who)\n for person in temp_people: #不在这周的人,插入一个空dataframe\n person_months[person].append(pd.DataFrame(columns=df_name_voc.columns).to_dict(orient='list'))\n \n person_monthes=[]\n person_id_features=[]\n labels=[]\n df_name.set_index(keys='phone_no_m',inplace=True)\n for p,ms in person_months.items(): #对所有人\n person_monthes.append(ms) # ms is a dict\n city=str(df_name.loc[p,'city_name'])\n county=str(df_name.loc[p,'county_name'])\n place=' '.join([city,county])\n idcard_cnt=df_name.loc[p,'idcard_cnt']\n if place not in place_code:\n place_code[place]=len(place_code)\n person_id_features.append([place_code[place],idcard_cnt])\n \n \n labels.append(df_name.loc[p,'label'])\n\n return person_monthes,person_id_features,labels,place_code # perosn_monthes is a list of all persons, a person is a list of all week dataframe\n\n\n\ndef get_person_info_and_feature_test(name_file,voc_file,place_code):\n with timer('read file'):\n df_name=pd.read_csv(name_file) #用户信息\n df_voc=pd.read_csv(voc_file) #通话记录\n df_name_voc=pd.merge(left=df_name,right=df_voc,on='phone_no_m')\n df_name_voc['start_datetime']=pd.to_datetime(df_name_voc['start_datetime'])\n #df_voc['day']=df_voc['start_datetime'].dt.day\n df_name_voc['hour']=df_name_voc['start_datetime'].dt.hour\n \n print('read file finished')\n #df_name.set_index('phone_no_m')\n \n\n #去除月消费记录\n pattern='.*arpu.*'\n columns=df_name_voc.columns.copy()\n for column in columns:\n if re.match(pattern=pattern,string=column):\n del(df_name_voc[column])\n\n \n df_name_voc=df_name_voc.dropna() #删掉所有空缺值的行\n \n \n\n #有多少个人\n people=set(df_name_voc['phone_no_m'])\n #people=list(people) #转换成列表,使其有序\n \n \n #按周划分,每个元素是一个月的dataframe\n month_group=[g for _,g in df_name_voc.groupby(pd.Grouper(key='start_datetime',freq='W'))] \n \n print('divide month finished')\n \n \n '''month group is a list of dataframes'''\n #month_group=month_group[:1]\n #按人划分 \n with timer('group by person'):\n for idx,month in enumerate(month_group):\n month_group[idx]=[g for _,g in month.groupby('phone_no_m')] #将month_group的每个元素转换成人在本月的列表\n print('divide person finished')\n \n \n \n person_months={}\n with timer('month person to person month'):\n for person in people:\n person_months[person]=list() #这个人所有月的dataframe\n index=0\n for persons in month_group: #对所有月\n # if index>=9:\n # break\n # index+=1\n temp_people=people.copy()\n for person in persons: #对该月的所有人\n person=person.sort_values(by='start_datetime')\n person=person.reset_index(drop=True)\n who=person.loc[0,'phone_no_m']\n person_months[who].append(person.to_dict(orient='list'))\n temp_people.remove(who)\n for person in temp_people: #对不在该月的所有人\n person_months[person].append(pd.DataFrame(columns=df_name_voc.columns).to_dict(orient='list'))\n \n person_monthes=[]\n person_id_features=[]\n labels=[]\n df_name.set_index(keys='phone_no_m',inplace=True)\n for p,ms in person_months.items(): #对所有人\n person_monthes.append(ms) #ms是一个dict,是这个人在该时间片的数据\n \n city=str(df_name.loc[p,'city_name'])\n county=str(df_name.loc[p,'county_name'])\n place=' '.join([city,county])\n idcard_cnt=df_name.loc[p,'idcard_cnt']\n if place not in place_code:\n place_code[place]=len(place_code)\n person_id_features.append([place_code[place],idcard_cnt])\n \n \n labels.append(df_name.loc[p,'label'])\n\n return person_monthes,person_id_features,labels,place_code\n\n# %%\n\nfrom features import time_gap2,time2,connector_duplicate2,area_change2,recall_rate2,energy_dispersion2,mean_voc_time2,n_unique_persons2\n# %%\nimport features as features\n\n\n# %%\ndef normalize(mx,is_trainset): #归一化\n max_list=[]\n min_list=[]\n num_feats=mx.shape[-1]\n print(num_feats)\n if is_trainset: #如果是训练集\n for i in range(num_feats):\n max_list.append(np.max(mx[:,:,i].flatten()))\n min_list.append(np.min(mx[:,:,i].flatten()))\n np.save(file=f'data_after/mymodel/max_value.npy',arr=max_list)\n np.save(file=f'data_after/mymodel/min_value.npy',arr=min_list)\n else:\n max_list=np.load(file=f'data_after/mymodel/max_value.npy')\n min_list=np.load(file=f'data_after/mymodel/min_value.npy')\n \n for i in range(num_feats):\n mx[:,:,i]=(mx[:,:,i]-min_list[i])/(max_list[i]-min_list[i])\n return mx\n\n\n \n\n\n# %%\ndef main():\n person_weeks,labels=get_person_months('data/0527/train/train_user.csv', 'data/0527/train/train_voc.csv')\n np_feature=[]\n\n '''\n split persons labels\n '''\n '''cancel comment'''\n with timer('feature '):\n for weeks in person_weeks:\n #weeks=weeks.sort_values()\n weeks_feature=[]\n index=0\n #with timer(f'the {index} person'):\n for idx,week in enumerate(weeks):\n #week\n mean=mean_voc_time2(week)\n var=features.var2(week)\n nup=n_unique_persons2(week)\n ed_mean=energy_dispersion2(week)\n rate=recall_rate2(week)\n change=area_change2(week)\n duplication=0.0\n \n if idx>0:\n duplication=connector_duplicate2(week,weeks[idx-1])\n call_time=time2(week)\n gap=time_gap2(week)\n #index+=1\n weeks_feature.append([mean,var,nup,ed_mean,rate,change,duplication,call_time,gap])\n np_feature.append(weeks_feature) \n \n np_feature2=[]\n labels2=[]\n index=0\n for weeks in np_feature:\n for i in range(0,len(weeks)-4):\n np_feature2.append(weeks[i:i+5])\n labels2.append(labels[index])\n index+=1\n np_array=np.array(np_feature2)\n '''cancel comment'''\n tensor_feature=torch.tensor(np_array)\n tensor_feature=normalize(tensor_feature)\n torch.save(tensor_feature, 'feature9.pt')\n tensor_label=torch.tensor(labels2)\n torch.save(tensor_label, 'label9.pt')\n\n\n# %%\n\ndef main_static(file_no):\n person_weeks,labels=get_person_months('data/0527/train/train_user.csv', f'data/test{file_no}.csv')\n feature_all=[]\n with timer('feature '):\n for person in person_weeks:\n #weeks=weeks.sort_values()\n weeks_feature=[]\n index=0\n #with timer(f'the {index} person'):\n mean=[]\n nup=[]\n eds=[]\n rates=[]\n areas=[]\n dups=list()\n call_times=list()\n gaps=list()\n for idx,week in enumerate(person):\n #week\n mean.extend(week['call_dur'])\n nup.extend(week['opposite_no_m'])\n ed_mean=energy_dispersion2(week)\n eds.append(ed_mean)\n rate=recall_rate2(week)\n rates.append(rate)\n areass=features.areas(week)\n areas.extend(areass)\n duplication=0.0\n \n if idx>0:\n duplication=connector_duplicate2(week,person[idx-1])\n dups.append(duplication)\n call_times.extend(week['hour'])\n gap=features.time_gap_static(week)\n gaps.extend(gap)\n #index+=1\n #weeks_feature.append([mean,nup,ed_mean,rate,change,duplication,call_time,gap])\n #np_feature.append(weeks_feature) \n mean=np.mean(mean)\n var=np.var(mean)\n nup=len(set(nup))\n eds=np.mean(eds)\n rates=np.mean(rates)\n areas=len(set(areas))\n dups=np.mean(dups)\n call_times=stats.mode(call_times)[0][0]\n gaps=np.mean(gaps)\n feature_all.append([mean,var,nup,eds,rates,areas,dups,call_times,gaps])\n \n np_feats=np.array(feature_all)\n np_labels=np.array(labels)\n np.save(file=f'data_after/baselinemodel/test_x_{file_no}.npy',arr=np_feats)\n np.save(file=f'data_after/baselinemodel/test_y_{file_no}.npy',arr=np_labels)\n '''cancel comment'''\n\n pass\n\n\n# %%\ndef main_allweeks():\n data_path='~/pythonprojs/sichuan/'\n place_code=dict()\n filepath=Path(data_path+'data_after/mymodel/place_code.npy')\n if filepath.is_file():\n place_code=np.load('data_after/mymodel/place_code.npy',allow_pickle=True).item()\n # 获取用户每周的通话记录dataframe\n # person_weeks,person_feature,labels,place_code=get_person_info_and_feature(data_path+'data/0527/train/train_user.csv', data_path+f'data/train.csv',place_code=place_code)\n # person_weeks_t,person_feature_t,labels_t,place_code=get_person_info_and_feature(data_path+'data/0527/train/train_user.csv',data_path+f'data/test.csv',place_code=place_code)\n \n \n train_slice_feature=dict()\n test_slice_feature=dict()\n \n train_id_feature=dict()\n test_id_feature=dict()\n \n np_feature=[]\n\n # 建立每周的网络\n all_user=pd.read_csv('data/0527/train/train_user.csv')\n # train_user=all_user.loc[all_user['phone_no_m'] in trai]\n \n train_voc=pd.read_csv(data_path+f'data/train.csv')\n test_voc=pd.read_csv(data_path+f'data/test.csv')\n train_voc['start_datetime']=pd.to_datetime(train_voc['start_datetime'])\n test_voc['start_datetime']=pd.to_datetime(test_voc['start_datetime'])\n train_voc['hour']=train_voc['start_datetime'].dt.hour\n test_voc['hour']=test_voc['start_datetime'].dt.hour\n train_voc=train_voc.merge(all_user[['phone_no_m','label']],how='left',on='phone_no_m')\n test_voc=test_voc.merge(all_user[['phone_no_m','label']],how='left',on='phone_no_m')\n train_user=train_voc['phone_no_m'].tolist()\n test_user=test_voc['phone_no_m'].tolist()\n train_vocs=[g for _,g in train_voc.groupby(pd.Grouper(key='start_datetime',freq='W'))]\n test_vocs=[g for _,g in test_voc.groupby(pd.Grouper(key='start_datetime',freq='W'))]\n train_nets=[nx.DiGraph() for i in range(len(train_vocs))]#训练集网络\n test_nets=[nx.DiGraph() for i in range(len(test_vocs))] #测试集网络\n # train_nets2=[nx.Graph() for i in range(len(train_vocs))]\n # test_nets2=[nx.Graph() for i in range(len(test_vocs))]\n train_neighbor={}#训练集邻居\n test_neighbor={}\n all_user.set_index(keys='phone_no_m',drop=False,inplace=True)\n for idx,net in enumerate(train_nets): #每个时间片的图\n net.add_nodes_from(train_user)\n # net2=train_nets2[idx]\n voc=train_vocs[idx]\n for row in voc.itertuples(): #每次通话\n calltype_id=getattr(row,'calltype_id')\n source=getattr(row,'phone_no_m')\n target=getattr(row,'opposite_no_m')\n net.add_edge(source,target) if calltype_id==1 else net.add_edge(target,source)\n if source not in train_neighbor:train_neighbor[source]=[]\n train_neighbor[source].append(target)\n \n for idx,net in enumerate(test_nets):\n net.add_nodes_from(test_user)\n voc=test_vocs[idx]\n for row in voc.itertuples():\n calltype_id=getattr(row,'calltype_id')\n source=getattr(row,'phone_no_m')\n target=getattr(row,'opposite_no_m')\n net.add_edge(source,target) if calltype_id==1 else net.add_edge(target,source)\n if source not in test_neighbor:test_neighbor[source]=[]\n test_neighbor[source].append(target)\n \n \n # 平均通话时长\n # 通话时长方差\n # 精力分散度\n \n train_voc['mean_dur']=train_voc.groupby(['phone_no_m',pd.Grouper(key='start_datetime',freq='W')])['call_dur'].transform('mean') #通话时长平均\n train_voc['var_dur']=train_voc.groupby(['phone_no_m',pd.Grouper(key='start_datetime',freq='W')])['call_dur'].transform('var') # 通话时长方差\n train_voc['sum_call_times']=train_voc.groupby(['phone_no_m'])['phone_no_m'].transform('count') #\n train_voc['every_one_calltimes']=train_voc.groupby(['phone_no_m','opposite_no_m'])['phone_no_m'].transform('count') \n train_voc['energy_dispersion']=train_voc['every_one_calltimes']/train_vocs['sum_call_times'] # 精力分散度\n del train_voc['sum_call_times']\n del train_voc['every_one_calltimes']\n \n test_voc['mean_dur']=test_voc.groupby(['phone_no_m',pd.Grouper(key='start_datetime',freq='W')])['call_dur'].transform('mean') #通话时长平均\n test_voc['var_dur']=test_voc.groupby(['phone_no_m',pd.Grouper(key='start_datetime',freq='W')])['call_dur'].transform('var') # 通话时长方差\n test_voc['sum_call_times']=test_voc.groupby(['phone_no_m'])['phone_no_m'].transform('count') #\n test_voc['every_one_calltimes']=test_voc.groupby(['phone_no_m','opposite_no_m'])['phone_no_m'].transform('count') \n test_voc['energy_dispersion']=test_voc['every_one_calltimes']/test_vocs['sum_call_times'] # 精力分散度\n del test_voc['sum_call_times']\n del test_voc['every_one_calltimes']\n\n # 需要算 出度、入度、邻居平均度、聚类系数、回拨率、联系人重复率、通话时刻分布\n # id 属性:\n \n train_user=set(train_user)\n test_user=set(test_user)\n tmp_train_user=train_user.copy()\n tmp_test_user=test_user.copy()\n with timer('train feature '):\n for idx,slice in enumerate(train_vocs): # each slice \n ps=[g for _,g in slice.groupby('phone_no_m')]\n net=train_nets[idx]\n net:nx.DiGraph\n for p in ps: # each person in this slice\n id=p['phone_no_m'].iloc[0]\n if idx==0: repeat_rate=0\n else:\n pre_week=train_vocs[idx-1]\n pre_week=pre_week.loc[pre_week['phone_no_m']==id]\n repeat_rate=connector_duplicate2(pre_week,p)\n indegree=net.in_degree[id]\n outdegree=net.out_degree[id]\n neighbor_degree=list()\n for neigh in train_neighbor[id]:\n neighbor_degree.append(net.degree[neigh])\n neighbor_degree=np.mean(neighbor_degree)\n coefficient=nx.clustering(net,id)\n recall_rate=features.recall_rate2(p)\n time_dis=[0 for i in range(24)]\n for time,count in p['hour'].value_counts():\n time_dis[time]=count\n if idx==0:train_slice_feature[id]=list()\n mean_dur=p['mean_dur'].iloc[0]\n var_dur=p['var_dur'].iloc[0]\n \n train_slice_feature[id].append([indegree,outdegree,neighbor_degree,coefficient,recall_rate,repeat_rate,mean_dur,var_dur]+time_dis)\n tmp_train_user.pop(id)\n for id in list(tmp_train_user): # persons not in this slice\n train_slice_feature[id]=[0 for i in range(32)]\n tmp_train_user\n static_voc=train_voc[['phone_no_m','energy_dispersion']].drop_duplicates(subset='phone_no_m').set_index(keys='phone_no_m')\n for id in list(train_user):\n idcard_cnt=all_user.loc[id,'idcard_cnt']\n ed=static_voc.loc['phone_no_m','energy_dispersion']\n train_id_feature[id]=[idcard_cnt,ed]\n\n for id,v in train_slice_feature.items():\n np_feature.append(v)\n np_id_feature.append(train_id_feature[id])\n labels.append(all_user.loc['phone_no_m','label'])\n\n np_feature=np.array(np_feature)\n np_id_feature=np.array(person_feature)\n labels=np.array(labels)\n #train_n=int(len(np_feature)/4)*3\n np_feature[:,[1,2,3,5,6]]=normalize(np_feature[:,[1,2,3,5,6]],is_trainset=True)\n np.save(arr=np_feature,file=data_path+f'data_after/mymodel3/train_x.npy')\n np.save(arr=np_id_feature,file=data_path+'data_after/mymodel3/train_x_id.npy')\n np.save(arr=labels,file=data_path+f'data_after/mymodel3/train_y.npy')\n\n np_feature=[] \n \n train_voc['mean_dur']=train_voc.groupby(['phone_no_m',pd.Grouper(key='start_datetime',freq='W')])['call_dur'].transform('mean') #通话时长平均\n train_voc['var_dur']=train_voc.groupby(['phone_no_m',pd.Grouper(key='start_datetime',freq='W')])['call_dur'].transform('var') # 通话时长方差\n train_voc['sum_call_times']=train_voc.groupby(['phone_no_m'])['phone_no_m'].transform('count') #\n train_voc['every_one_calltimes']=train_voc.groupby(['phone_no_m','opposite_no_m'])['phone_no_m'].transform('count') \n train_voc['energy_dispersion']=train_voc['every_one_calltimes']/train_vocs['sum_call_times'] # 精力分散度\n del train_voc['sum_call_times']\n del train_voc['every_one_calltimes']\n \n pass\n\n\n\ndef main_allweeks_testastest():\n place_code=dict()\n filepath=Path('data_after/mymodel/place_code.npy')\n if filepath.is_file():\n place_code=np.load('data_after/mymodel/place_code.npy',allow_pickle=True).item()\n person_weeks,person_feature,labels,place_code=get_person_info_and_feature_test('data/0527/test/test_user1.csv', f'data/0527/test/test_voc.csv',place_code=place_code)\n\n np_feature=[]\n\n with timer('train feature '):\n for weeks in person_weeks:\n #weeks=weeks.sort_values()\n weeks_feature=[]\n index=0\n #with timer(f'the {index} person'):\n for idx,week in enumerate(weeks):\n #week\n mean=mean_voc_time2(week)\n var=features.var2(week)\n nup=n_unique_persons2(week)\n ed_mean=energy_dispersion2(week)\n rate=recall_rate2(week)\n change=area_change2(week)\n duplication=0.0\n \n if idx>0:\n duplication=connector_duplicate2(week,weeks[idx-1])\n call_time=time2(week)\n gap=time_gap2(week)\n #index+=1\n weeks_feature.append([mean,var,nup,ed_mean,rate,change,duplication,call_time,gap])\n np_feature.append(weeks_feature) \n np_feature=np.array(np_feature)\n np_id_feature=np.array(person_feature)\n labels=np.array(labels)\n\n\n np.save(arr=normalize(np_feature,is_trainset=True),file=f'data_after/mymodel/true_test_x.npy')\n np.save(arr=np_id_feature,file='data_after/mymodel/true_test_x_id.npy')\n np.save(arr=labels,file=f'data_after/mymodel/true_test_y.npy')\n pass\n# %%\nif __name__==\"__main__\":\n main_allweeks()\n \n\n\n\n# %%\n","repo_name":"researchonbigdata/DPGFD","sub_path":"data_process/dataprocess.py","file_name":"dataprocess.py","file_ext":"py","file_size_in_byte":24020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23659355514","text":"import numpy as np\nimport random\nfrom itertools import product\nimport contextlib\nfrom EnvironmentModel import *\nfrom ModelBasedPolicy import *\nfrom TabularModelFreePolicy import *\nfrom TabularModelFreePolicy import *\nfrom NonTabularModelFreeLearning import *\n\n\n# Configures numpy print options\n@contextlib.contextmanager\ndef _printoptions(*args, **kwargs):\n original = np.get_printoptions()\n np.set_printoptions(*args, **kwargs)\n try:\n yield\n finally: \n np.set_printoptions(**original)\n\n\nclass FrozenLake(Environment):\n def __init__(self, lake, slip, max_steps, seed=None):\n Environment.__init__(self, lake.size + 1, 4, max_steps, None, seed)\n \n self.lake = np.array(lake)\n self.lake_flat = lake.reshape(-1)\n \n self.slip = slip # probability of slipping\n \n n_states = self.lake.size + 1\n n_actions = 4\n \n self.pi = np.zeros(n_states, dtype=float)\n self.pi[np.where(self.lake_flat == '&')[0]] = 1.0 # intialise starting square by pi to 0 at this square (zero at all others)\n \n self.absorbing_state = n_states - 1\n \n # Up, down, left, right\n actions = [(-1, 0), (1, 0), (0, -1), (0, 1)]\n \n # Indices to states (coordinates), states (coordinates) to indices \n itos = list(product(range(self.lake.shape[0]), range(self.lake.shape[1])))\n stoi = {s: i for (i, s) in enumerate(itos)}\n \n # Precomputed transition probabilities\n self.p_mat = np.zeros((n_states, n_states, n_actions))\n\n hole = []\n goal = 0\n for i in range(len(self.lake_flat)):\n if self.lake_flat[i] == '#': # find where there are holes in the lake\n hole.append(i)\n if self.lake_flat[i] == '$': # find where the goal is in the lake\n goal = i\n \n for state_index, state in enumerate(itos):\n for action_index, action in enumerate(actions):\n if stoi.get(state) in hole or stoi.get(state) == goal: # if next state s absorbing state (i.e if state is hole or goal)\n next_state_index = self.absorbing_state\n else:\n next_state = (state[0] + action[0], state[1] + action[1])\n # If next_state is not valid, default to current state index\n next_state_index = stoi.get(next_state, state_index)\n \n # include slipping probability in transition matrix when next state = current state\n if stoi.get(next_state) == stoi.get(state):\n self.p_mat[next_state_index, state_index, action_index] = (self.slip/n_actions)\n\n # if next state is absorbing state, probability is 1\n if stoi.get(state) in hole or stoi.get(state) == goal:\n self.p_mat[next_state_index, state_index, action_index] = 1\n\n # include slipping probability in transition matrix\n else:\n self.p_mat[next_state_index, state_index, action_index] += (1-self.slip) # save probabilities of tansitioning between states\n for i in range(n_actions):\n self.p_mat[next_state_index, state_index, i] += (self.slip/n_actions) # include probability of sliping and taking a random action\n \n # every action at absorbing state always goes to absorbing state \n self.p_mat[self.absorbing_state][self.absorbing_state] = (1)\n\n # Transition probabilities for non-slipping environment (for comparison)\n self.p_mat_nonslip = np.zeros((n_states, n_states, len(actions)))\n\n for state_index, state in enumerate(itos):\n for action_index, action in enumerate(actions):\n if stoi.get(state) in hole or stoi.get(state) == goal: # if next state s absorbing state (i.ei if state is hole or goal)\n next_state_index = self.absorbing_state\n else:\n next_state = (state[0] + action[0], state[1] + action[1])\n # If next_state is not valid, default to current state index\n next_state_index = stoi.get(next_state, state_index)\n self.p_mat_nonslip[next_state_index, state_index, action_index] = 1.0 # save probabilities of tansitioning between states\n self.p_mat_nonslip[self.absorbing_state][self.absorbing_state] = 1 # every action at absorbing state always goes to absorbing state\n\n\n def draw(self, state, action):\n p = [self.p(ns, state, action, self.p_mat) for ns in range(self.n_states)]\n # find next state according to probabilities\n next_state = self.random_state.choice(self.n_states, p=p)\n # find reward of transitioning to next state\n reward = self.r(next_state, state, action)\n \n return next_state, reward\n\n def nonslip_draw(self, state, action): \n # find what the non-slipping next state would be\n p = [self.p(ns, state, action, self.p_mat_nonslip) for ns in range(self.n_states)]\n nonslip_next_state = self.random_state.choice(self.n_states, p=p)\n\n return nonslip_next_state\n\n def step(self, action):\n # end game if run out of moves\n state, reward, done = Environment.step(self, action)\n # end game if reached absorbing state\n done = (state == self.absorbing_state) or done\n \n return state, reward, done\n\n def p(self, next_state, state, action, mat = None):\n if mat is None:\n mat = self.p_mat\n # find probability from pre-caluated transition probabilities matrix\n probability = mat[next_state, state, action]\n return probability\n \n def r(self, next_state, state, action):\n # only recieve a reward when transitioning from goal state to absorbing state\n if state == self.absorbing_state - 1 and next_state == self.absorbing_state:\n reward = 1\n else:\n reward = 0\n \n return reward\n \n def render(self, policy=None, value=None):\n\n if policy is None:\n lake = np.array(self.lake_flat) \n if self.state < self.absorbing_state:\n lake[self.state] = '@' \n print(lake.reshape(self.lake.shape))\n\n else:\n actions = ['↑', '↓', '←', '→']\n print('Lake:')\n print(self.lake)\n \n print('Policy:')\n policy = np.array([actions[a] for a in policy[:-1]])\n print(policy.reshape(self.lake.shape))\n \n print('Value:')\n with _printoptions(precision=3, suppress=True):\n print(value[:-1].reshape(self.lake.shape))\n\n print(\"\")\n\n def selectAction(self):\n keys = [\"w\",\"s\",\"a\",\"d\"] # up, down, left, right\n a = False\n \n while not a:\n key = input(\"Input action:\")\n a= self.validMove(key, keys) \n action = keys.index(key)\n \n return action\n \n def validMove(self, key, keys): \n # if input action is invalid, return false\n if key not in keys:\n print(\"Invalid action! Try again\")\n return False\n else:\n return True\n \n def randomAction(self): \n return random.choice([0,1,2,3])\n \n def play(self,):\n state = self.reset()\n self.render() \n done = False\n move_no = 0\n actions = ['↑', '↓', '←', '→']\n \n while not done:\n move = self.max_steps - move_no\n print(\"Number of moves remaining:\",move)\n # select action to make move\n action = self.selectAction()\n print(\"Move:\", actions[action],\"\\n\")\n nonslip_next_state = self.nonslip_draw(state, action)\n state, r, done = self.step(action)\n \n # find out if you've slipped \n if state != nonslip_next_state:\n print(\"Whoops, you slipped!\")\n \n self.render() \n \n if move_no == self.max_steps-1:\n print(\"Run out of Moves\")\n\n if done == True:\n print(\"Score:\",r)\n return r\n \n move_no +=1\n \n # function to play game multple times\n def multiplePlay(self, iterations):\n score = 0\n for i in range(iterations):\n print(\"\\n\\n___________________________________\")\n print(\"Game\", i+1)\n print(\"Current Score:\",score)\n r = self.play()\n score += r\n print(\"Final Score:\", score)\n \n\n\n\ndef main():\n \n seed = 0\n \n # Small lake\n lake = np.array([['&',' ',' ',' ',],\n [' ','#',' ','#'],\n [' ',' ',' ','#'],\n ['#',' ',' ','$']])\n \n env = FrozenLake(lake,slip=0.1,max_steps=16,seed = seed)\n\n gamma = 0.9\n theta = 0.001\n max_iterations = 100\n \n\n # Policy Iteration\n policy,value,it = policy_iteration(env, gamma, theta, max_iterations, policy=None)\n print(\"1) Policy Iteration: Number of Iterations:\",it)\n print(\" \")\n env.render(policy,value)\n print(\" \")\n \n #Value Iteration\n policy,value,it = value_iteration(env, gamma, theta, max_iterations, value=None)\n print(\"2) Value Iteration: Number of Iterations:\",it)\n print(\" \")\n env.render(policy,value)\n print(\"___________________________________\")\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sclark003/Reinforcement-Learning_Frozen-Lake","sub_path":"SmallFrozenLake.py","file_name":"SmallFrozenLake.py","file_ext":"py","file_size_in_byte":9645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23589313381","text":"number = int(input())\nsynonyms = dict()\n\nfor num in range(number):\n\n word = input()\n synonym = input()\n if word not in synonyms:\n synonyms[word] = [synonym]\n\n else:\n synonyms[word].append(synonym)\nfor s in synonyms:\n value = \", \".join(synonyms[s])\n print(f\"{s} - {value}\")\n","repo_name":"niki9011/python-fundamentals","sub_path":"dictionaries_lab/word_synonyms.py","file_name":"word_synonyms.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21189629026","text":"import unittest\nimport sys\nsys.path.append('..')\n\nfrom classe.Player import Player\nfrom classe.Enemy import Enemy\nfrom classe.SwordItem import SwordItem\nfrom classe.PotionItem import PotionItem\n\nclass PlayerTest(unittest.TestCase):\n def setUp(self):\n self.player = Player(\"John\", 100, 50, 20, 30)\n self.strongEnemy = Enemy(\"Dragon\", 9999, 10)\n self.weakEnemy = Enemy(\"Hyène\", 30, 50)\n\n def test_character_initialization(self):\n self.assertEqual(self.player.name, \"John\")\n self.assertEqual(self.player.health, 100)\n self.assertEqual(self.player.strength, 50)\n self.assertEqual(self.player.agility, 20)\n self.assertEqual(self.player.intelligence, 30)\n self.assertEqual(self.player.items, [])\n\n def test_character_attack_failed(self):\n self.assertFalse(self.player.attack(self.strongEnemy))\n\n def test_character_attack_success(self):\n self.assertTrue(self.player.attack(self.weakEnemy))\n\n def test_character_defence_survived(self):\n self.assertTrue(self.player.defence(self.weakEnemy))\n # -25 hp\n self.assertEqual(self.player.health, 75)\n self.assertFalse(self.player.is_dead())\n\n def test_character_defence_died(self):\n self.player.health = 1\n self.assertFalse(self.player.defence(self.strongEnemy))\n self.assertTrue(self.player.is_dead())\n\n def test_character_escape_success(self):\n self.assertTrue(self.player.escape(self.strongEnemy))\n\n def test_character_escape_failed(self):\n self.assertFalse(self.player.escape(self.weakEnemy))\n\n def test_character_is_dead(self):\n self.assertFalse(self.player.is_dead())\n self.player.health = 0\n self.assertTrue(self.player.is_dead())\n self.player.health = -1\n self.assertTrue(self.player.is_dead())\n\n def test_character_add_item(self):\n swordItem = SwordItem(\"Excalibur\", 10)\n potionItem = PotionItem(20)\n items = [swordItem, potionItem]\n\n self.player.add_item(swordItem)\n self.player.add_item(potionItem)\n\n self.assertEqual(self.player.items[0].name, items[0].name)\n self.assertEqual(self.player.items, items)\n\n def test_character_use_item(self):\n swordItem = SwordItem(\"Excalibur\", 10)\n potionItem = PotionItem(20)\n beforeStrength = self.player.strength\n beforeHealth = self.player.health\n\n self.player.add_item(swordItem)\n self.player.add_item(swordItem)\n self.player.add_item(potionItem)\n\n self.player.use_item(swordItem)\n self.player.use_item(potionItem)\n\n self.assertEqual(self.player.strength, beforeStrength + swordItem.strength)\n self.assertEqual(self.player.health, beforeHealth + potionItem.health)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"esgi-cedric-kadima/saga-valoria","sub_path":"back_end/tests/test_player.py","file_name":"test_player.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32003722239","text":"import time\n\nimport telebot\nimport openai\nfrom environs import Env\n\n\nenv = Env()\nenv.read_env()\n\nBOT_TOKEN = env.str('BOT_TOKEN')\nAPI_KEY = env.str('API_KEY')\nADMIN_ID = env.list('ADMIN_ID')\nIP = env.str('IP')\n\nbot = telebot.TeleBot(BOT_TOKEN)\nopenai.api_key = API_KEY\n\n\n@bot.message_handler(content_types=['text'])\ndef handle_text(message):\n response = openai.Completion.create(\n engine='text-davinci-003',\n prompt=f'{message.text}',\n max_tokens=1024,\n n=1,\n stop=None,\n temperature=0.5,\n )\n bot.send_message(message.chat.id, response.choices[0].text)\n\n\nbot.polling()\n\n# if __name__ == '__main__':\n# while True:\n# try:\n# bot.polling(none_stop=True, interval=0)\n# except Exception as e:\n# time.sleep(10)","repo_name":"Amadu-A/chat_gpt_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32123825179","text":"# cách 2:\n# sử dụng hàm\n\nimport math\n\n# hàm nhập dữ liệu\ndef Ham_NhapDuLieu():\n a = float(input(\"Nhap a: \"))\n\n while a == 0:\n a = float(input(\"Nhap lai a (a != 0): \"))\n\n b = float(input(\"Nhap b: \"))\n c = float(input(\"Nhap c: \"))\n\n return a, b, c\n\n# hàm tính delta:\ndef Ham_Tinh_delta(a, b, c):\n delta = b**2 - 4*a*c\n\n return delta\n\n# hàm phương trình vô nghiệm\ndef Ham_PhuongTrinhVoNghiem():\n print(f\"Phuong trinh vo nghiem!\")\n\n# hàm phương trình có nghiệm kép\ndef Ham_PhuongTrinhCo_NghiemKep(a, b):\n x = -b / (2*a)\n print(f\"Phuong trinh co nghiem kep: \", end=\"\")\n print(f\"x1 = x2 = {x:{'.'}2f}\")\n\n# hàm phương trình có 2 nghiệm\ndef Ham_PhuongTrinhCo_2_Nghiem(a, b, delta):\n x1 = (-b + math.sqrt(delta)) / (2*a)\n x2 = (-b - math.sqrt(delta)) / (2*a)\n\n print(f\"Phuong trinh co 2 nghiem: \", end=\"\")\n print(f\"x1 = {x1:{'.'}2f}, x2 = {x2:{'.'}2f}\")\n\n# chương trình trính\n\n# gọi hàm nhập dữ liệu\na, b, c = Ham_NhapDuLieu()\n\n# gọi hàm tính delta\ndelta = Ham_Tinh_delta(a, b, c)\n\nif delta < 0:\n # gọi hàm cho trường hợp vô nghiệm\n Ham_PhuongTrinhVoNghiem();\n\nelif delta == 0:\n # gọi hàm cho trường hợp nghiệm kép\n Ham_PhuongTrinhCo_NghiemKep(a, b)\n\nelif delta > 0:\n # gọi hàm cho trường hợp 2 nghiệm\n Ham_PhuongTrinhCo_2_Nghiem(a, b, delta)","repo_name":"conggaro/Hoc_Python","sub_path":"code python/Tuần 6 Hàm/Ví dụ 6-1/cach_2.py","file_name":"cach_2.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9408534524","text":"\ndef solution(arr1, arr2):\n answer = []\n for i in range(len(arr1)):\n temp = []\n for j in range(len(arr[i])):\n temp.append(arr1[i][j]+arr2[i][j])\n answer.append(temp)\n \n return answer\n\ndef solutrion(x,y):\n answer = [[c+d for c,d in zip(a,b)] for a,b in zip(x,y)]\n return answer","repo_name":"sanghyeonchoi/Algorithm","sub_path":"programers03.py","file_name":"programers03.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1425079810","text":"import sys\nimport os\nimport random\nimport math\nimport argparse\nimport numpy as np\nimport scipy.stats\n\nclass CDFentry:\n def __init__(self):\n cdf_ = 0\n val_ = 0\n\nclass UniformRandomVariable:\n def __init__(self):\n self.min_ = 0.0\n self.max_ = 1.0\n\n def value(self):\n unif0_1 = random.random()\n return self.min_ + (self.max_ - self.min_) * unif0_1\n\nclass ExponentialRandomVariable:\n def __init__(self, avg):\n self.avg_ = avg\n self.urv = UniformRandomVariable()\n def value(self):\n return -1.0 * self.avg_ * math.log(self.urv.value())\n\n\nclass EmpiricalRandomVariable:\n\n def __init__(self, filename, smooth):\n self.smooth = smooth\n self.minCDF_ = 0\n self.maxCDF_ = 1\n self.table_ = []\n for i in range(65536):\n self.table_.append(CDFentry())\n if(filename != \"\"):\n self.loadCDF(filename)\n\n def loadCDF(self, filename):\n numEntry_ = 0\n prev_cd = 0\n prev_sz = 1\n w_sum = 0\n file = open(filename, \"r\")\n f = file.readlines()\n for line in f:\n values = line.split()\n self.table_[numEntry_].val_ = float(values[0])\n self.table_[numEntry_].cdf_ = float(values[1])\n self.table_[numEntry_].cdf_ = float(values[2])\n freq = self.table_[numEntry_].cdf_ - prev_cd\n flow_sz = 0\n if self.smooth:\n flow_sz = (self.table_[numEntry_].val_ + prev_sz) / 2.0\n else: \n flow_sz = self.table_[numEntry_].val_\n w_sum += freq * flow_sz\n prev_cd = self.table_[numEntry_].cdf_\n prev_sz = self.table_[numEntry_].val_\n numEntry_ += 1\n\n self.mean_flow_size = w_sum * 1460.0;\n file.close()\n self.numEntry_ = numEntry_\n return numEntry_\n\n def lookup(self, u):\n lo = 1\n hi = self.numEntry_ - 1 \n mid = 0\n if u <= self.table_[0].cdf_:\n return 0\n while lo < hi:\n mid = (lo + hi) / 2\n if u > self.table_[mid].cdf_:\n lo = mid + 1\n else:\n hi = mid \n return lo\n \n def interpolate(self, x, x1, y1, x2, y2):\n value = y1 + (x - x1) * (y2 - y1) / (x2 - x1)\n return value\n\n def value(self):\n if self.numEntry_ <= 0:\n return 0\n u = random.random()\n mid = self.lookup(u)\n if mid != 0 and u < self.table_[mid].cdf_:\n return self.interpolate(u, self.table_[mid-1].cdf_, self.table_[mid-1].val_,\n self.table_[mid].cdf_, self.table_[mid].val_)\n return self.table_[mid].val_\n\n","repo_name":"Terabit-Ethernet/workload_generator","sub_path":"random_variable.py","file_name":"random_variable.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"338632027","text":"from src.preprocessing.preprocessing import Pre_processing\nfrom src.utils.constants import file_path\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import accuracy_score\n\n\nclass SVM_Classification:\n def __init__(self):\n self.dataset = file_path\n\n def apply_model(self):\n \"\"\"\n Function to create instance of Pre_processing class,\n passing file path for preprocessing,\n then sending that data to svm_classification function.\n \"\"\"\n data = Pre_processing(self.dataset)\n processed_data = data.convert_dataframe()\n self.svm_classification(processed_data)\n\n @staticmethod\n def svm_classification(processed_data):\n \"\"\"\n Function to make prediction using SVM(kernel=\"rbf\"),\n using X_train and X_test, then make prediction\n according to X_test data and then checking accuracy\n using y_test data.\n \"\"\"\n try:\n X_train = processed_data[0]\n X_test = processed_data[1]\n y_train = processed_data[2]\n y_test = processed_data[3]\n\n model = SVC(kernel='rbf')\n model.fit(X_train, y_train)\n\n prediction = model.predict(X_test)\n\n score = accuracy_score(y_test, prediction)\n print(\"Accuracy using SVM is:\", score)\n\n except:\n print(\"Some Error Occurred in Process\")\n","repo_name":"amanknoldus/assignment-4","sub_path":"Assignment_4/src/pipeline/svm_classification_credit_card.py","file_name":"svm_classification_credit_card.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13748739151","text":"from tkinter import *\nfrom tkinter import messagebox\n\n# Window - size, title and colours\nwindow = Tk()\nwindow.geometry(\"600x300\")\nwindow.title(\"Exception Handling\")\nwindow[\"bg\"] = \"violet\"\n\n\n# Information on the window, labels, entries and buttons. Defining functions\nclass Money:\n def __init__(self, window):\n self.label = Label(window, text=\"Please enter your amount.\", bg=\"violet\")\n self.label.place(x=200, y=50)\n self.label_entry = Entry(window)\n self.label_entry.place(x=200, y=100)\n self.verify = Button(window, text=\"Verify\", bg=\"pink\", command=self.verify, borderwidth=5)\n self.verify.place(x=250, y=150)\n self.exit = Button(window, text=\"Exit\", command=self.exit, bg=\"lightblue\", borderwidth=5)\n self.exit.place(x=255, y=200)\n\n# Defining the verify button function\n def verify(self):\n try:\n money = float(self.label_entry.get())\n if money < 3000:\n messagebox.showerror(\"Insufficient funds\", \"Please deposit more funds for this excursion.\")\n self.label_entry.delete(0, END)\n else:\n messagebox.showinfo(\"Accepted\", \"Congratulations. You qualify to go to Malaysia\")\n self.label_entry.delete(0, END)\n except ValueError:\n messagebox.showerror(\"Invalid input\", \"Please put in an amount in numbers.\")\n self.label_entry.delete(0, END)\n\n# Defining the exit button function\n def exit(self):\n msg_box = messagebox.askquestion(\"Exit Application\", \"Are you sure you want to exit the application\",\n icon='warning')\n if msg_box == \"yes\":\n window.destroy()\n\n\n# Object Money(class), Window(where information is displayed)\nobj = Money(window)\n# Closing the window\nwindow.mainloop()\n","repo_name":"byronleetinker/Error_Handling_Task","sub_path":"window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25780326445","text":"import point_gnc\nimport gnc_geometric_network\nimport matplotlib.pyplot as plt\n# import numpy as np\nimport pandas as pd\n\n\n# -----------------------------------------------------------------------------------------------------\n# Function that generates random geometric model\n\ndef generate_model(n, c):\n net = gnc_geometric_network.Network(c)\n for i in range(0, n):\n p = point_gnc.Point(i)\n net.add_vertex(p)\n net.make_edges()\n # net.print_network()\n # net.draw_network(\"main_network\")\n return net\n\n\n# -----------------------------------------------------------------------------------------------------\n# Drawing distribution histogram\n\ndef draw_distribution_histogram(categories, counts, title):\n plt.bar(categories, counts, color='orange')\n plt.title('{} distribution'.format(title))\n plt.xlabel('degree')\n plt.ylabel('number of nodes')\n plt.savefig('{}_bar.png'.format(title), dpi=300)\n plt.show()\n\n\n# -----------------------------------------------------------------------------------------------------\n\ndef gnc_experiments():\n col_names = ['n', 'c', 'degree', 'amount_of_nodes']\n distributions_df = pd.DataFrame(columns=col_names)\n c_list = [0.05, 0.1, 0.15, 0.2]\n n = 100\n for c in c_list:\n n_c_df = pd.DataFrame(columns=col_names)\n for run_counter in range(0, 100):\n net = generate_model(n, c)\n mat = net.adjacency_matrix()\n sum_arr = []\n for i in range(len(mat)):\n sum_i = 0\n for j in range(len(mat)):\n sum_i += mat[i][j]\n sum_arr.append(sum_i)\n degree_count = [0] * len(sum_arr)\n for i in range(len(sum_arr)):\n degree_count[sum_arr[i]] += 1\n for i in range(len(degree_count)):\n n_c_df_i = pd.DataFrame([[n, c, i, degree_count[i]]], columns=col_names)\n n_c_df = n_c_df.append(n_c_df_i, ignore_index=True, sort=False)\n '''print(\"n_c_df :\")\n print(n_c_df)'''\n for degree in range(0, n):\n true_false_degree = n_c_df['degree'] == degree\n means_node = n_c_df.loc[true_false_degree, 'amount_of_nodes'].mean()\n df_i = pd.DataFrame([[n, c, degree, means_node]], columns=col_names)\n distributions_df = distributions_df.append(df_i, ignore_index=True, sort=False)\n distributions_df.to_csv('distributions_gnc.csv')\n\n\n# -----------------------------------------------------------------------------------------------------\n\ndef main():\n gnc_experiments()\n '''n = 50\n c = 0.4\n net = generate_model(n, c)\n mat = net.adjacency_matrix()\n sum_arr = []\n for i in range(len(mat)):\n sum = 0\n for j in range(len(mat)):\n sum += mat[i][j]\n sum_arr.append(sum)\n degree_count = [0] * len(sum_arr)\n for i in range(len(sum_arr)):\n degree_count[sum_arr[i]] += 1\n print(\"degree_count :\")\n print(degree_count)\n # draw_distribution_histogram(list(np.arange(n)), degree_count, 'degree')\n # rail_v_geo, rail_e_geo = posa_improvement_for_geometric_model.posa(net)\n # max_clique_group = maximal_clique_algorithm.deterministic_maximal_clique_algorithm(net)\n # maximal_clique_algorithm.check_if_clique(net, max_clique_group)\n'''\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"TzuriR/Complex-Networks","sub_path":"clique/gnc_model_generation.py","file_name":"gnc_model_generation.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"72597871526","text":"import sqlite3\ncon=sqlite3.connect(\"Dersler.db\")\ncursor=con.cursor()\nisim=input(\"İsim Giriniz\\t:\")\nsoyisim=input(\"Soyisim Giriniz\\t:\")\nOkulNo=input(\"Okul Numaranızı Giriniz\\t:\")\nNotunuz=input(\"Notunuzu Giriniz\\t:\")\n\ndef tabloOlustur():\n cursor.execute(\"CREATE TABLE IF NOT EXISTS Ogrenciler(Ad TEXT,Soyad TEXT,Numara INT, Notu INT)\")\ndef kayitEkle():\n cursor.execute(\"INSERT INTO Ogrenciler VALUES('\"+isim+\"','\"+soyisim+\"',\"+OkulNo+\",\"+Notunuz+\")\")\n con.commit()\n\n\ntabloOlustur()\nkayitEkle()\ndef kayitlariGoruntule():\n cursor.execute(\"SELECT * FROM Ogrenciler where Notu=100\")\n kayitlar=cursor.fetchall()\n for i in kayitlar:\n print(i)\n\nkayitlariGoruntule()\ncon.close()","repo_name":"hasangurbuz01/Yuz_Tanima_Calismalari","sub_path":"UygulamalarHG/SQLiteTablodanVeriCekme.py","file_name":"SQLiteTablodanVeriCekme.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1392713815","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Actividad',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('titulo', models.TextField()),\n ('tipo', models.TextField()),\n ('precio', models.TextField()),\n ('fecha', models.DateField()),\n ('hora', models.TimeField()),\n ('larga_dur', models.TextField()),\n ('url', models.TextField()),\n ('fecha_usuario', models.DateField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Usuario',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('nombre', models.CharField(max_length=32)),\n ('titulo_usuario', models.TextField()),\n ('actividades', models.ManyToManyField(to='final.Actividad')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"riojafernando/2015-saro-pfinal","sub_path":"final/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71132652645","text":"#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n# Author: K_liu\r\n'STM32 操作函数模块'\r\n\r\nimport serial,math,re,time,pickle\r\nimport sys\r\nsys.path.append(\"../nntrain\")\r\nimport number_recog as nr\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nimport ctypes\r\n\r\ndef send_frame(s,d,base_addr):\r\n sof = [0x55,0xab,0xaa] #start of frame\r\n fl = [len(d)-1] #length of frame\r\n frame = sof + [base_addr] + fl + d #combine frame\r\n s.write(frame)\r\n\r\ndef array2uc(ar):\r\n a = []\r\n d = 0\r\n i = 0\r\n for l in range(ar.shape[0]):\r\n if(ar[l] == 1):\r\n d = (d << 1) | 1\r\n else:\r\n d = (d << 1) | 0\r\n i += 1\r\n if(i >= 8):\r\n a.append(d)\r\n i = 0\r\n d = 0\r\n if d != 0: #无法被8整除\r\n a.append(d)\r\n for i in a:\r\n print(\"%02X\"%i,end = ' ')\r\n print(\"\\n\")\r\n return a\r\n\r\ndef uc2array(ar):\r\n a = np.zeros([len(ar)*8])\r\n j = 0\r\n for l in ar:\r\n for i in range(0,8):\r\n if (l & (1<<(7-i))) != 0:\r\n a[j] = 1\r\n else:\r\n a[j] = 0\r\n j += 1\r\n return a\r\n\r\ndef softmax(X):\r\n exps = np.exp(X)\r\n return exps/np.sum(exps)\r\n\r\ndef serial_test(com,noise=0.0,mode=0,epoch_n=0,read_bas=None,datal=None):\r\n if read_bas == None or datal == None:\r\n with open('./sctest.info', 'rb') as fp:\r\n sc_info = pickle.load(fp)\r\n read_base = int(sc_info['dst_addr'])\r\n datalen = int(sc_info['out_nn'])\r\n else:\r\n read_base = read_bas\r\n datalen = datal\r\n\r\n with open('./sctest.info', 'rb') as fp:\r\n sc_info = pickle.load(fp)\r\n f_bit, e_bit = sc_info['float_format']\r\n\r\n ser = serial.Serial(com, 115200, timeout=1.0)\r\n ds = nr.Number_Data('../nntrain/number_source.txt')\r\n ds.generate_train_set(noise, 20)\r\n #原始数据模式\r\n if mode == 0:\r\n for i in range(0, epoch_n):\r\n da = array2uc(ds.train_x[:, i])\r\n show_set = ds.train_x[:, i].reshape([8, 9])\r\n sda = ser.read_all()\r\n send_frame(ser, da, read_base)\r\n sda = ser.read(4*datalen).hex()\r\n str = re.sub(r\"(?<=\\w)(?=(?:\\w\\w)+$)\", \" \", sda)\r\n fo = []\r\n for j in range(0,datalen):\r\n dc = sda[j*8:j*8+8]\r\n fo.append(cf_dll.s2f(ctypes.c_char_p(bytes(dc,'utf-8')),f_bit,e_bit))\r\n print('%3.2f'%fo[-1],end=' ')\r\n mindex =fo.index(max(fo))\r\n fo = softmax(np.array(fo)).reshape(1,datalen)\r\n #print('\\n',fo)\r\n print('\\n序号:%d,最大值:%.3f'%(mindex,fo[0,mindex]))\r\n\r\n plt.subplot(1, epoch_n, i + 1), plt.imshow(show_set, 'gray')\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.xlabel(format(' %d : %.2f'%(mindex,fo[0,mindex])))\r\n plt.pause(0.01)\r\n ser.close()\r\n plt.show()\r\n #接收纯数据\r\n elif mode == 1:\r\n for i in range(0, epoch_n):\r\n da = array2uc(ds.train_x[:, i])\r\n show_set = ds.train_x[:, i].reshape([8, 9])\r\n sda = ser.read_all()\r\n send_frame(ser, da, read_base)\r\n sda = ser.read(4 * datalen).hex()\r\n str = re.sub(r\"(?<=\\w)(?=(?:\\w\\w)+$)\", \" \", sda)\r\n\r\n plt.subplot(1, epoch_n, i + 1), plt.imshow(show_set, 'gray')\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.pause(0.01)\r\n ser.close()\r\n plt.show()\r\n #图显模式\r\n else:\r\n epoch_t = 4\r\n sn = math.ceil(datalen / 8)\r\n for i in range(0, epoch_n):\r\n da = ds.train_x[:, i]\r\n show_set = da.reshape([8, 9])\r\n str = ser.read_all()\r\n #da[0:23] = 0\r\n da = array2uc(da)\r\n send_frame(ser, da, read_base)\r\n plt.subplot(epoch_t, epoch_n, i + 1), plt.imshow(show_set, 'gray')\r\n # 第J次迭代\r\n for j in range(1, epoch_t):\r\n str = (re.sub(r\"(?<=\\w)(?=(?:\\w\\w)+$)\", \" \", ser.read(sn).hex())).split(' ')\r\n a = []\r\n print(str)\r\n if len(str) >= sn:\r\n for d in range(0, sn):\r\n a.append(int(str[d], 16))\r\n a.reverse()\r\n send_frame(ser, a, read_base)\r\n else:\r\n a = [0xfe for i in range(0,sn)]\r\n send_frame(ser,da, read_base)\r\n\r\n show_set = uc2array(a).reshape([8, 9])\r\n plt.subplot(epoch_t, epoch_n, i + j * epoch_n + 1), plt.imshow(show_set, 'gray')\r\n plt.pause(0.001)\r\n ser.close()\r\n plt.show()\r\n\r\ndef show_s2h(dc):\r\n hex_c = re.sub(r\"(?<=\\w)(?=(?:\\w\\w)+$)\", \" \", dc).split(' ')\r\n hex_h = []\r\n for i in hex_c:\r\n hex_h.append(int(i, 16))\r\n show_set = uc2array(hex_h).reshape(8, 9)\r\n plt.subplot(111), plt.imshow(show_set, 'gray')\r\n plt.show()\r\n\r\nif __name__ == '__main__':\r\n cf_dll = ctypes.windll.LoadLibrary('../bin/f2cf_dll.dll')\r\n cf_dll.s2f.restype = ctypes.c_float\r\n noise = 0.10\r\n img_n = 5\r\n mode = 2 #0:接收浮点数据 2:接收二值图像\r\n serial_test('COM17',noise,mode,img_n)\r\n\r\n\r\n","repo_name":"boyoffreedom/FPGA_NN_Generator","sub_path":"NN_CORE/tools/sc_test.py","file_name":"sc_test.py","file_ext":"py","file_size_in_byte":5315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32978403213","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nimport sys\nfrom PyQt5.QtSerialPort import QSerialPort, QSerialPortInfo\nfrom PyQt5.QtCore import QIODevice\nfrom PyQt5.QtWidgets import QMessageBox\nfrom enum import Enum\n\nBAUD_RATE = 9600\n\n\n\nclass ExpectedSignalType(Enum):\n NO_WAITING = 1\n WAITING_PINS_INFO = 2\n WAITING_COMMAND_RESULT = 3\n\nclass PinState(Enum):\n OFF = 0\n ON = 1\n\nCOMMANDS = {(PinState.ON, 1) : \"comm1\\n\", # словарь команд: (состояние пина, номер пина) -> команда\n (PinState.ON, 2) : \"comm2\\n\",\n (PinState.ON, 3) : \"comm3\\n\",\n (PinState.OFF, 1) : \"comm4\\n\",\n (PinState.OFF, 2) : \"comm5\\n\",\n (PinState.OFF, 3) : \"comm6\\n\",\n }\n\nclass Ui_portSelection(object):\n \n def setupUi(self, portSelection):\n # window drawing\n portSelection.setObjectName(\"portSelection\")\n portSelection.resize(311, 149)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(portSelection.sizePolicy().hasHeightForWidth())\n portSelection.setSizePolicy(sizePolicy)\n portSelection.setMinimumSize(QtCore.QSize(311, 149))\n portSelection.setMaximumSize(QtCore.QSize(311, 149))\n self.horizontalLayoutWidget = QtWidgets.QWidget(portSelection)\n self.horizontalLayoutWidget.setGeometry(QtCore.QRect(30, 50, 261, 80))\n self.horizontalLayoutWidget.setObjectName(\"horizontalLayoutWidget\")\n self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)\n self.horizontalLayout.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.comboBox_pins = QtWidgets.QComboBox(self.horizontalLayoutWidget)\n self.comboBox_pins.setObjectName(\"comboBox_pins\")\n self.horizontalLayout.addWidget(self.comboBox_pins)\n self.pushButton_ok = QtWidgets.QPushButton(self.horizontalLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.pushButton_ok.sizePolicy().hasHeightForWidth())\n self.pushButton_ok.setSizePolicy(sizePolicy)\n self.pushButton_ok.setObjectName(\"pushButton_ok\")\n self.horizontalLayout.addWidget(self.pushButton_ok)\n self.label = QtWidgets.QLabel(portSelection)\n self.label.setGeometry(QtCore.QRect(20, 20, 281, 16))\n self.label.setObjectName(\"label\")\n\n self.__retranslateUi(portSelection)\n QtCore.QMetaObject.connectSlotsByName(portSelection)\n\n self.comboBox_pins.addItems(self.__search_serial_ports()) # filling comboBox of ports\n self.pushButton_ok.clicked.connect(lambda: self.__open_main_window(portSelection))\n \n\n def __retranslateUi(self, portSelection):\n _translate = QtCore.QCoreApplication.translate\n portSelection.setWindowTitle(_translate(\"portSelection\", \"Выбор COM-порта\"))\n self.pushButton_ok.setText(_translate(\"portSelection\", \"OK\"))\n self.label.setText(_translate(\"portSelection\", \"Выберете COM-порт для связи с микроконтроллером\"))\n\n\n def __search_serial_ports(self): # method of searching for list of available serial ports\n portlist = []\n for port in QSerialPortInfo().availablePorts():\n portlist.append(port.portName())\n return portlist\n \n\n def __open_main_window(self, portSelection):\n if (self.comboBox_pins.currentText() == \"\"): # проверка: есть ли COM-порт\n creators_info = QMessageBox()\n creators_info.setWindowTitle(\"Ошибка!\")\n creators_info.setText(\"Отсутствуют COM-порты.\")\n creators_info.exec_()\n self.__search_serial_ports() # повторный запуск функи�� определения COM-портов\n return\n self.MainWindow = QtWidgets.QMainWindow()\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self.MainWindow, self.comboBox_pins.currentText()) # opening the main window and specifying the selected port \n self.MainWindow.show()\n portSelection.close()\n\n\nclass Ui_MainWindow(object):\n \n def setupUi(self, MainWindow, currentSerialPort):\n # window drawing\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.setEnabled(True)\n MainWindow.resize(380, 150)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())\n MainWindow.setSizePolicy(sizePolicy)\n MainWindow.setMinimumSize(QtCore.QSize(380, 150))\n MainWindow.setMaximumSize(QtCore.QSize(380, 150))\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)\n self.verticalLayoutWidget.setGeometry(QtCore.QRect(20, 20, 91, 111))\n self.verticalLayoutWidget.setObjectName(\"verticalLayoutWidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.checkBox_pin_1 = QtWidgets.QCheckBox(self.verticalLayoutWidget)\n self.checkBox_pin_1.setObjectName(\"checkBox_pin_1\")\n self.verticalLayout.addWidget(self.checkBox_pin_1)\n self.checkBox_pin_2 = QtWidgets.QCheckBox(self.verticalLayoutWidget)\n self.checkBox_pin_2.setObjectName(\"checkBox_pin_2\")\n self.verticalLayout.addWidget(self.checkBox_pin_2)\n self.checkBox_pin_3 = QtWidgets.QCheckBox(self.verticalLayoutWidget)\n self.checkBox_pin_3.setObjectName(\"checkBox_pin_3\")\n self.verticalLayout.addWidget(self.checkBox_pin_3)\n self.verticalLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget)\n self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(130, 40, 201, 80))\n self.verticalLayoutWidget_2.setObjectName(\"verticalLayoutWidget_2\")\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2)\n self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\n self.label_currentSerialPort = QtWidgets.QLabel(self.verticalLayoutWidget_2)\n self.label_currentSerialPort.setObjectName(\"label_currentSerialPort\")\n self.verticalLayout_2.addWidget(self.label_currentSerialPort)\n self.pushButton_changeSerialPort = QtWidgets.QPushButton(self.verticalLayoutWidget_2)\n self.pushButton_changeSerialPort.setObjectName(\"pushButton_changeSerialPort\")\n self.verticalLayout_2.addWidget(self.pushButton_changeSerialPort)\n self.pushButton_programInfo = QtWidgets.QPushButton(self.verticalLayoutWidget_2)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.pushButton_programInfo.sizePolicy().hasHeightForWidth())\n self.pushButton_programInfo.setSizePolicy(sizePolicy)\n self.pushButton_programInfo.setObjectName(\"pushButton_programInfo\")\n self.verticalLayout_2.addWidget(self.pushButton_programInfo)\n MainWindow.setCentralWidget(self.centralwidget)\n\n self.__retranslateUi(MainWindow, currentSerialPort)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n # инициализация COM-porta, установка скорости передачи, открытие порта\n self.serial = QSerialPort()\n self.serial.setBaudRate(BAUD_RATE)\n self.serial.setPortName(currentSerialPort)\n self.serial.open(QIODevice.ReadWrite)\n\n self.serial.readyRead.connect(lambda: self.__get_info_from_mc()) # надо ли оно??\n \n self.type_of_expected_mc_signal = ExpectedSignalType.NO_WAITING # тип ожидаемого сигнала от МК - остутствие сигнала\n self.__check_pins_states() # запрос состояний пинов\n\n # user interface signals\n self.checkBox_pin_1.clicked.connect(lambda: self.__change_pin_state(1, PinState((self.checkBox_pin_1.isChecked() + 1) // 2)))\n self.checkBox_pin_2.clicked.connect(lambda: self.__change_pin_state(2, PinState((self.checkBox_pin_2.isChecked() + 1) // 2)))\n self.checkBox_pin_3.clicked.connect(lambda: self.__change_pin_state(3, PinState((self.checkBox_pin_3.isChecked() + 1) // 2)))\n self.pushButton_programInfo.clicked.connect(self.__show_program_info)\n self.pushButton_changeSerialPort.clicked.connect(lambda: self.__open_portSelection_window(MainWindow))\n\n\n def __retranslateUi(self, MainWindow, currentSerialPort):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Система управления питанием СЗИ\"))\n self.checkBox_pin_1.setText(_translate(\"MainWindow\", \"Пин №1\"))\n self.checkBox_pin_2.setText(_translate(\"MainWindow\", \"Пин №2\"))\n self.checkBox_pin_3.setText(_translate(\"MainWindow\", \"Пин №3\"))\n self.label_currentSerialPort.setText(_translate(\"MainWindow\", \"Текущий COM-порт: \")+currentSerialPort)\n self.pushButton_changeSerialPort.setText(_translate(\"MainWindow\", \"Изменить COM-порт\"))\n self.pushButton_programInfo.setText(_translate(\"MainWindow\", \"О программе\"))\n\n\n def __get_info_from_mc(self):\n inputData = str(self.serial.readLine(), 'utf-8').strip()\n #print(inputData) # Для отладки \n if (self.type_of_expected_mc_signal == ExpectedSignalType.NO_WAITING):\n print(\"Несканционированная передача данных с микроконтроллера.\") \n # MessageBox?\n #\n #\n elif (self.type_of_expected_mc_signal == ExpectedSignalType.WAITING_PINS_INFO):\n print(\"Получение инфы от МК...\") \n # здесь должен быть парсер\n #\n #\n return [0, 2, 0]\n elif (self.type_of_expected_mc_signal == ExpectedSignalType.WAITING_COMMAND_RESULT):\n self.type_of_expected_mc_signal = ExpectedSignalType.NO_WAITING\n if (inputData == \"Command is correct\"): return True\n else: return False\n\n\n def __check_pins_states(self):\n self.type_of_expected_mc_signal = ExpectedSignalType.WAITING_PINS_INFO\n # здесь должна быть команда запроса (?)\n #\n #\n\n states_of_pins = self.__get_info_from_mc()\n self.checkBox_pin_1.setCheckState(states_of_pins[0])\n self.checkBox_pin_2.setCheckState(states_of_pins[1])\n self.checkBox_pin_3.setCheckState(states_of_pins[2])\n \n\n def __change_pin_state(self, pinNumber, newPinState):\n command = COMMANDS.get(tuple([newPinState, pinNumber])).encode('utf-8')\n #print(command) # временно для отладки\n self.type_of_expected_mc_signal = ExpectedSignalType.WAITING_COMMAND_RESULT\n self.serial.write(command)\n self.serial.waitForBytesWritten()\n if (not self.__get_info_from_mc()): # обратное изменение состояния в случае ошибки\n print(\"Произошла ошибка\") # временно для отладки (или message Box)\n if (pinNumber == 1):\n self.checkBox_pin_1.setCheckState((newPinState.value + 2) % 3)\n elif (pinNumber == 2):\n self.checkBox_pin_2.setCheckState((newPinState.value + 2) % 3)\n elif (pinNumber == 3):\n self.checkBox_pin_3.setCheckState((newPinState.value + 2) % 3)\n\n\n def __show_program_info(self):\n creators_info = QMessageBox()\n creators_info.setWindowTitle(\"О программе\")\n creators_info.setText(\"Создатели:\\nCтуденты группы АБ-920\\nПольщиков Г.А., Попова Ю.А., Посуконько О.А., Репин С.Е.\")\n creators_info.exec_()\n\n\n def __open_portSelection_window(self, mainWindow):\n self.portSelection = QtWidgets.QMainWindow()\n self.ui = Ui_portSelection()\n self.ui.setupUi(self.portSelection)\n self.portSelection.show()\n self.serial.close() #закрытие COM-порта\n mainWindow.close()\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n startWindow = QtWidgets.QWidget()\n ui = Ui_portSelection()\n ui.setupUi(startWindow)\n startWindow.show()\n sys.exit(app.exec_())\n","repo_name":"proximummax/BestProject","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37289405367","text":"import json\n\nDIV_FAC = 1000.0 * 1000.0\n\nwith open('example.json','r') as j:\n #ex = [val for val in json.loads(j.read()) if \"args\" in val and \"data\" in val[\"args\"] and \"functionName\" in val[\"args\"][\"data\"] and \"tdur\" in val and \"name\" in val and val[\"name\"] == \"FunctionCall\"]\n #fns = set([x['args']['data']['functionName'] for x in ex])\n #print(fns)\n #print(len(ex))\n #print(sum([x[\"tdur\"] for x in ex],0) / DIV_FAC)\n ex = [val for val in json.loads(j.read()) if \"args\" in val and \"data\" in val[\"args\"] and \"cpuProfile\" in val[\"args\"][\"data\"] and \"nodes\" in val[\"args\"][\"data\"]['cpuProfile']]\n print(len(ex))\n allNodes = sum([val[\"args\"][\"data\"]['cpuProfile']['nodes'] for val in ex],[])\n print(len(allNodes))\n ","repo_name":"mikesol/munge","sub_path":"explore.py","file_name":"explore.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43756975144","text":"\nimport Problem\nimport Tests\nimport logging\nimport pandas\n\n\n\n# def main():\n\nlogging.basicConfig(level=logging.INFO)\n\nfname = 'SupplyChain_SimpleProblem.xlsx'\nlogging.info('STARTING')\n\nlogging.info('=='*10)\nlogging.info('CONFIGURING PARAMETERS')\nparameters = Problem.get_parameters(fname)\n# print(parameters)\n\nlogging.info('=='*10)\n\n# Run Solver\nsolver = Problem.SupplyChainProblem()\nsolver.configure(parameters)\nsolution_status = solver.run()\n\n# Check solver status\nif not solution_status:\n raise Exception('Solution not found!')\n\n# Print output summary\n# solver.print_summary()\npandas.options.display.max_rows = solver._tables['summary'].shape[0] \npandas.options.display.max_columns = solver._tables['summary'].shape[1]\nprint(solver._tables['summary'])\nprint(solver._tables['summary'].sum(axis=0))\n\n\n\nsolver.export_results()\n\n\n\n","repo_name":"tciodaro/experimentos","sub_path":"SupplyChain/.ipynb_checkpoints/run_simpleProblem-checkpoint.py","file_name":"run_simpleProblem-checkpoint.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30040283076","text":"#utilizando for y if este programa recibe palabras y muestra las que empiezan por cierta letra\ncantidad = int(input(\"ingrese la cantidad de palabras:\"))\nlistap = []\nfor i in range(cantidad):\n palabra=str(input(\"ingrese una palabra:\"))\n listap.append(palabra)\ninicial = str(input(\"letra inicial:\"))\n\npalabracorrecta = []\nfor palabra in listap:\n if palabra.startswith(inicial):\n palabracorrecta.append(palabra)\nprint(palabracorrecta)","repo_name":"KeviinSerna/Tareas-Kevin","sub_path":"listadepalabras.py","file_name":"listadepalabras.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42427877","text":"import cv2\nimport numpy as np\n\nsrc = cv2.imread(r\"11.jpg\")\n\ndst = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)\n\nlower = np.array([100, 160, 100])\nupper = np.array([250, 255, 200])\n\nmask = cv2.inRange(dst, lower, upper)\nres = cv2.bitwise_and(src, src, mask=mask)\n\ncv2.imshow(\"mask\", mask)\ncv2.imshow(\"img\", src)\ncv2.imshow(\"res\", res)\ncv2.waitKey(0)\n","repo_name":"852251748/practiceCode","sub_path":"Openstudy/HSV.py","file_name":"HSV.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42123421540","text":"#importing all the required libraries\nimport streamlit as st\nimport time\nimport plotly.graph_objects as go\nimport pandas as pd\nimport numpy as np\nimport random as rd\nfrom tensorflow import keras\n\n\nmodel = keras.models.load_model('myModel.model')\n\nst.title(\"Real-Time Object \")\n\ndummy1 = pd.DataFrame()\ndummy2 = pd.DataFrame()\n\ndummy1['Value'] = []\ndummy2['Value'] = []\n\ndummy1['Time'] = []\ndummy2['Time'] = []\n\n\nplot_spot = st.empty()\ntext_box_info = st.empty()\nStatus = st.empty()\n\ndef make_chart(df1, df2, df3, df4, df5, df6, df7 ,df8, df9 , df10, df11,df12):\n fig = go.Figure()\n fig.add_trace(go.Scatter(x=df1['Time'], y=df1['Value'], mode='lines+markers' , name='P1'))\n \n fig.update_layout(width=900, height=570, xaxis_title='time',\n yaxis_title='Value')\n\n fig.add_trace(go.Scatter(x=df2['Time'], y=df2['Value'], mode='lines+markers' , name='P2'))\n \n fig.update_layout(width=800, height=550, xaxis_title='time',\n yaxis_title='Value')\n\n fig.add_trace(go.Scatter(x=df3['Time'], y=df3['Value'], mode='lines+markers', name='P3'))\n \n fig.update_layout(width=800, height=550, xaxis_title='time',\n yaxis_title='Value')\n\n fig.add_trace(go.Scatter(x=df4['Time'], y=df4['Value'], mode='lines+markers' , name='P4'))\n \n fig.update_layout(width=800, height=550, xaxis_title='time',\n yaxis_title='Value')\n\n fig.add_trace(go.Scatter(x=df5['Time'], y=df5['Value'], mode='lines+markers' , name='G1'))\n \n fig.update_layout(width=800, height=550, xaxis_title='time',\n yaxis_title='Value')\n\n fig.add_trace(go.Scatter(x=df6['Time'], y=df6['Value'], mode='lines+markers' , name='G2'))\n \n fig.update_layout(width=800, height=550, xaxis_title='time',\n yaxis_title='Value')\n\n fig.add_trace(go.Scatter(x=df7['Time'], y=df7['Value'], mode='lines+markers' , name='G3'))\n \n fig.update_layout(width=800, height=550, xaxis_title='time',\n yaxis_title='Value')\n\n fig.add_trace(go.Scatter(x=df8['Time'], y=df8['Value'], mode='lines+markers' , name='G4'))\n \n fig.update_layout(width=800, height=550, xaxis_title='time',\n yaxis_title='Value')\n\n\n fig.add_trace(go.Scatter(x=df9['Time'], y=df9['Value'], mode='lines+markers', name=\"Tau1\"))\n \n fig.update_layout(width=800, height=550, xaxis_title='time',\n yaxis_title='Value')\n\n\n fig.add_trace(go.Scatter(x=df10['Time'], y=df10['Value'], mode='lines+markers', name=\"Tau2\"))\n \n fig.update_layout(width=800, height=550, xaxis_title='time',\n yaxis_title='Value')\n\n\n fig.add_trace(go.Scatter(x=df11['Time'], y=df11['Value'], mode='lines+markers', name=\"Tau3\"))\n \n fig.update_layout(width=800, height=550, xaxis_title='time',\n yaxis_title='Value')\n\n\n fig.add_trace(go.Scatter(x=df12['Time'], y=df12['Value'], mode='lines+markers', name=\"Tau4\"))\n \n fig.update_layout(width=800, height=550, xaxis_title='time',\n yaxis_title='Value')\n\n st.write(fig)\n\n\ntau1 = pd.DataFrame({'Time' : [i for i in range(100000)] , \"Value\" : [rd.uniform(0.5,10) for i in range(100000)]})\ntau2 = pd.DataFrame({'Time' : [i for i in range(100000)] , \"Value\" : [rd.uniform(0.5,10) for i in range(100000)]})\ntau3 = pd.DataFrame({'Time' : [i for i in range(100000)] , \"Value\" : [rd.uniform(0.5,10) for i in range(100000)]})\ntau4 = pd.DataFrame({'Time' : [i for i in range(100000)] , \"Value\" : [rd.uniform(0.5,10) for i in range(100000)]})\n\np1 = pd.DataFrame({'Time' : [i for i in range(100000)] , \"Value\" : [rd.uniform(-2,-0.5) for i in range(100000)]})\np2 = pd.DataFrame({'Time' : [i for i in range(100000)] , \"Value\" : [rd.uniform(-2,-0.5) for i in range(100000)]})\np3 = pd.DataFrame({'Time' : [i for i in range(100000)] , \"Value\" : [rd.uniform(-2,-0.5) for i in range(100000)]})\np4 = pd.DataFrame({'Time' : [i for i in range(100000)] , \"Value\" : [rd.uniform(-2,-0.5) for i in range(100000)]})\n\ng1 = pd.DataFrame({'Time' : [i for i in range(100000)] , \"Value\" : [rd.uniform(0.05, 1.00) for i in range(100000)]})\ng2 = pd.DataFrame({'Time' : [i for i in range(100000)] , \"Value\" : [rd.uniform(0.05, 1.00) for i in range(100000)]})\ng3 = pd.DataFrame({'Time' : [i for i in range(100000)] , \"Value\" : [rd.uniform(0.05, 1.00) for i in range(100000)]})\ng4 = pd.DataFrame({'Time' : [i for i in range(100000)] , \"Value\" : [rd.uniform(0.05, 1.00) for i in range(100000)]})\n\n\ncounter = 0\n\nstart = 0\nend = 10\nfor i in range(10000):\n \n with plot_spot:\n make_chart(p1[start +counter : end + counter],\n p2[start +counter : end + counter],\n p3[start +counter : end + counter],\n p4[start +counter : end + counter],\n g1[start +counter : end + counter],\n g2[start +counter : end + counter],\n g3[start +counter : end + counter],\n g4[start +counter : end + counter],\n tau1[start +counter : end + counter],\n tau2[start +counter : end + counter],\n tau3[start +counter : end + counter],\n tau4[start +counter : end + counter])\n \n CurrentText = f''' \n\n Reaction Time:\n The reaction time of Generation Node: {tau1['Value'][counter]}\n The reaction time of participant 1: {tau2['Value'][counter]}\n The reaction time of participant 2: {tau3['Value'][counter]}\n The reaction time of participant 3: {tau4['Value'][counter]}\n \n Nominal Power Produced:\n Nominal Power Produced by Generation Node: {p1['Value'][counter]}\n Nominal Power Produced participant 1: {p2['Value'][counter]}\n Nominal Power Produced participant 2: {p3['Value'][counter]}\n Nominal Power Produced participant 3: {p4['Value'][counter]}\n\n Price elasticity coefficien:\n Price elasticity coefficient for each network Generation Node: {g1['Value'][counter]}\n Price elasticity coefficient for each network participant 1: {g2['Value'][counter]}\n Price elasticity coefficient for each network participant 2: {g3['Value'][counter]}\n Price elasticity coefficient for each network participant 3: {g4['Value'][counter]}\n '''\n \n with Status:\n y_pred = model.predict(\n [[ tau1['Value'][counter], tau2['Value'][counter], tau3['Value'][counter], tau4['Value'][counter],\n p1['Value'][counter], p2['Value'][counter], p3['Value'][counter], p4['Value'][counter],\n g1['Value'][counter], g2['Value'][counter], g3['Value'][counter], g4['Value'][counter] ]]\n )[0][0]\n if y_pred == 0:\n st.warning(f'Current ID: {round(rd.random()*10000)} \\n Your grid is currently stable 🔥')\n else:\n st.success(f'Current ID: {round(rd.random()*10000)} \\n Your grid currently unstable 🔑')\n\n\n with text_box_info:\n st.write(\"Current Parameter Values\",CurrentText)\n\n\n \n counter = counter + 1\n\n time.sleep(0.5)","repo_name":"chethanreddy123/Grid-Failure-Using-NN","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28896406401","text":"#encoding:utf-8\r\n# 记载所需模块\r\nimport matplotlib\r\nmatplotlib.use(\"Agg\")\r\nfrom sklearn.preprocessing import LabelBinarizer\r\nfrom pyimagesearch.nn.conv import resnet\r\nfrom pyimagesearch.callbacks import epochcheckpoint as EPO\r\nfrom pyimagesearch.callbacks import trainingmonitor as TM\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.datasets import cifar10\r\nfrom keras.optimizers import SGD\r\nfrom keras.models import load_model\r\nimport keras.backend as K\r\nimport numpy as np\r\nimport argparse\r\n\r\n# 解析命令行参数\r\nap = argparse.ArgumentParser()\r\nap.add_argument('-c','--checkpoints',required=True,help='path to output checkpoint directory')\r\nap.add_argument('-m','--model',type=str,help='path to *specific* model checkpoint to load')\r\nap.add_argument('-s','--start_epoch',type=int,default =0,help='epoch to restart training as ')\r\nargs = vars(ap.parse_args())\r\n\r\n# 加载train和test数据集\r\nprint('[INFO] loading CIFAR-10 data...')\r\n((trainX,trainY),(testX,testY)) = cifar10.load_data()\r\n# 转化为float\r\ntrainX = trainX.astype(\"float\")\r\ntestX = testX.astype(\"float\")\r\n# 计算RGB通道均值\r\nmean = np.mean(trainX,axis =0)\r\n# 零均值化\r\ntrainX -= mean\r\ntestX -= mean\r\n\r\n# 标签编码处理\r\nlb = LabelBinarizer()\r\ntrainY = lb.fit_transform(trainY)\r\ntestY = lb.fit_transform(testY)\r\n\r\n# 数据增强\r\naug = ImageDataGenerator(width_shift_range = 0.1,\r\n height_shift_range = 0.1,\r\n horizontal_flip = True,\r\n fill_mode='nearest')\r\n# 若未指定checkpoints模型,则直接初始化模型\r\nif args['model'] is None:\r\n print(\"[INFO] compiling model...\")\r\n opt = SGD(lr=1e-1)\r\n model = resnet.ResNet.build(32,32,3,10,(9,9,9),(64,64,128,256),reg=0.0005)\r\n model.compile(loss='categorical_crossentropy',optimizer=opt,metrics = ['accuracy'])\r\n# 否则从磁盘中加载checkpoints模型\r\nelse:\r\n print(\"[INFO] loading {}...\".format(args['model']))\r\n model = load_model(args['model'])\r\n # 更新学习率\r\n print(\"[INFO] old learning rate: {}\".format(K.get_value(model.optimizer.lr)))\r\n K.set_value(model.optimizer.lr,1e-5)\r\n print(\"[INFO] new learning rate: {}\".format(K.get_value(model.optimizer.lr)))\r\n# 回调函数列表\r\ncallbacks = [\r\n # checkpoint\r\n EPO.EpochCheckpoint(args['checkpoints'],every = 5,startAt = args['start_epoch']),\r\n # 监控训练过程\r\n TM.TrainingMonitor(\"output/resnet56_cifar10.png\",\r\n jsonPath=\"output/resnet56_cifar10.json\",\r\n startAt = args['start_epoch'])\r\n]\r\n# 训练网络\r\nprint(\"[INFO] training network.....\")\r\nmodel.fit_generator(\r\n aug.flow(trainX,trainY,batch_size=128),\r\n validation_data = (testX,testY),\r\n steps_per_epoch = len(trainX) // 128,\r\n epochs = 10,\r\n callbacks = callbacks,\r\n verbose =1\r\n)","repo_name":"lonePatient/Deep_Learning_For_Computer_Vision_With_Python","sub_path":"chapter_12/resnet_tiny_imagenet/resnet_cifar10.py","file_name":"resnet_cifar10.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"52"} +{"seq_id":"41282333856","text":"import pyglet\r\nimport random\r\nfrom pyglet.window import mouse\r\n\r\nwindow = pyglet.window.Window(1024, 768, resizable=True)\r\n\r\nblue = pyglet.image.load('blue.png')\r\nblue.width = 162\r\nblue.height = 162\r\n\r\nyellow = pyglet.image.load('red.png')\r\nyellow.width = 162\r\nyellow.height = 162\r\n\r\nred = pyglet.image.load('yellow.png')\r\nred.width = 162\r\nred.height = 162\r\n\r\nbatch = pyglet.graphics.Batch()\r\ncolor_bubbles = [blue, yellow, red]\r\ndirection_list = []\r\nbir = []\r\nbubble_list = []\r\ncounter = 0\r\ncounter_update = 0\r\n\r\ndef new_bubble(x, y):\r\n\tbubble = pyglet.sprite.Sprite(random.choice(color_bubbles), batch=batch)\r\n\tbubble.position = (x - 81, y - 81)\r\n\tbubble_list.append(bubble)\r\n\tdir = [-100,100]\r\n\tdirection_list.append(dir)\r\n\r\n\t\r\ndef update(dt):\r\n\tfor i in range(len(bubble_list)):\r\n\t\tbubble_list[i].x += direction_list[i][0]*dt\r\n\t\tbubble_list[i].y += direction_list[i][1]*dt\r\n\t\tif bubble_list[i].x == window.width - 162 or bubble_list[i].x == 0:\r\n\t\t\tdirection_list[i][0] *= -1\r\n\t\tif bubble_list[i].y == window.height - 162 or bubble_list[i].y == 0:\r\n\t\t\tdirection_list[i][1] *= -1\r\n\r\n\t\r\n\r\n\t\r\n\r\n@window.event()\r\ndef on_mouse_press(x, y, button, modifiers):\r\n\tif button == mouse.LEFT:\r\n\t\tnew_bubble(x, y)\r\n\r\n\r\n@window.event()\r\ndef on_draw():\r\n\twindow.clear()\r\n\tbatch.draw()\r\n\r\n\r\npyglet.clock.schedule_interval(update, 1/60)\r\npyglet.app.run()","repo_name":"Vickytor/codes","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71931657446","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 29 15:47:45 2016\r\n\r\n@author: krikunov\r\n\"\"\"\r\n\r\ndef timedelta_minutes(t1, t2):\r\n \"\"\"\r\n Calculates time delta betwee two datetime.time instances in minutes.\r\n If t1 > t2 then assumes that t1 is the previous day time instance. \r\n \"\"\"\r\n if t1 > t2:\r\n td_h = 24 - t1.hour + t2.hour\r\n td_m = 0 - t1.minute + t2.minute\r\n else:\r\n td_h = t2.hour - t1.hour\r\n td_m = t2.minute - t1.minute\r\n \r\n return td_h * 60 + td_m","repo_name":"itmo-escience/APAnalysis","sub_path":"AG/Generator/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12448398530","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Editorial',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('content', models.TextField()),\n ],\n ),\n migrations.CreateModel(\n name='Issue',\n fields=[\n ('name', models.CharField(max_length=150)),\n ('number', models.IntegerField(serialize=False, primary_key=True)),\n ('start_date', models.DateField()),\n ('end_date', models.DateField()),\n ],\n ),\n migrations.CreateModel(\n name='Squiggle',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('event1', models.CharField(max_length=128)),\n ('date1', models.DateField()),\n ('event2', models.CharField(max_length=128, blank=True)),\n ('date2', models.DateField(null=True, blank=True)),\n ('embed_code', models.TextField(blank=True)),\n ],\n options={\n 'ordering': ['-date1'],\n },\n ),\n migrations.CreateModel(\n name='UpcomingSquiggle',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('event', models.CharField(max_length=128)),\n ('date', models.DateField()),\n ('last_date', models.DateField()),\n ('event_description', models.TextField(blank=True)),\n ],\n ),\n ]\n","repo_name":"badrihippo/sirius","sub_path":"issue/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28976974498","text":"f = open('file.txt', 'w')\nf.close\n# f.close는 열려 있는 파일 객체를 닫아 주는 역할(작업 종료)을 한다.\n# 파일 경로에 역슬래시(\\)를 사용할 땐 역슬래시를 두 개 사용하거나,\n# 문자열 앞에 r을 적어 줄바꿈 문자로 해석되지 않게 해야 한다.\n'''\nopen 함수를 통해 파일 생성 : 파일 객체 = open(파일 이름, 파일 열기 모드)\n\nr(읽기모드) : 파일을 읽기만 할 때 사용\nw(쓰기모드) : 파일에 내용을 쓸 때 사용\na(추가모드) : 파일의 마지막에 새로운 내용을 추가 시킬 때 사용\n'''\n\nf = open('newfile.txt', 'w')\nfor i in range(1, 11):\n data = f'{i}번째 줄입니다.\\n'\n f.write(data)\nf.close()\n\n\nf = open('c:/Users/김경모/Desktop/python/newfile.txt', 'r')\nline = f.readline() # 파일을 한 줄씩 읽는 함수\nprint(line)\nf.close()\nprint('\\n')\n\n\nf = open('c:/Users/김경모/Desktop/python/newfile.txt', 'r')\nwhile True: # readline 함수로 파일의 모든 줄을 읽는 경우\n line = f.readline()\n if not line:\n break\n print(line)\nf.close()\nprint('\\n')\n\n\nf = open('c:/Users/김경모/Desktop/python/newfile.txt', 'r')\nlines = f.readlines() # 파일의 모든 줄을 읽는 함수\nfor line in lines:\n line = line.strip() # 공백 제거\n print(line)\nf.close()\nprint('\\n')\n# readline과 readlines 헷갈리지 않기!\n\n\nf = open('c:/Users/김경모/Desktop/python/newfile.txt', 'r')\ndata = f.read() # 파일의 내용 전체를 문자열로 돌려주는 함수\ndata = data.strip()\nprint(data)\nf.close()\nprint('\\n')\n\n\nf = open('c:/Users/김경모/Desktop/python/newfile.txt', 'a')\nfor a in range(11, 21):\n data = (f'{a}번째 줄입니다.\\n')\n f.write(data)\nf.close()\n# 쓰기모드(w)로 파일을 열면 기존의 있던 내용이 삭제되므로,\n# 추가모드(a)로 파일을 열어 내용�� 작성한다.\n\n\nwith open('new.txt', 'w') as f:\n f.write('파이썬 공부 어렵다.')\nwith open('c:/Users/김경모/Desktop/python/new.txt', 'r') as f:\n data = f.read()\n print(data)\n# with문을 사용하면 with 블록을 벗어나는 순간 열린 파일 객체 f가 자동으로 close된다.\n","repo_name":"k-mozzi/Jump-to-Python","sub_path":"입출력/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15336909775","text":"# 默认参数\ndef power(x, n=2):\n return x ** n\n\nprint(power(2)) # => 4\nprint(power(2, 4)) # => 16\n\n\ndef add_end(L=[]):\n L.append('END')\n return L\n\nprint(add_end([1, 2, 3])) # => [1, 2, 3, 'END']\n# 默认参数一定指向不变对象\nprint(add_end()) # => ['END']\nprint(add_end()) # => ['END', 'END']\n\n\ndef add_end(L=None):\n if L is None:\n L = []\n L.append('END')\n return L\n\nprint(add_end([1, 2, 3])) # => [1, 2, 3, 'END']\n# 默认参数一定指向不变对象\nprint(add_end()) # => ['END']\nprint(add_end()) # => ['END']\n\n# 可变参数\n\n\ndef calc(*args):\n x = []\n y = []\n print(type(args)) # => \n for n in args:\n if isinstance(n, (int, float)):\n x.append(n)\n if isinstance(n, str):\n y.append(n)\n return x, y\n\nnumberList, stringList = calc(1, 2, '3', 4, '5')\nprint(numberList, stringList) # => [1, 2, 4] ['3', '5']\n\n\n# 关键字参数\ndef person(name, age, **kw):\n print(type(kw)) # => \n print('city' in kw)\n print('name:', name, ',age:', age, ',other:', kw)\n\nperson('Jack', 22, city='chengdu')\n\nother = {'City': 'Asla', 'Gender': 'Man'}\nperson('Bluce', 32, city=other['City'], gender=other['Gender'])\n\n\ndef person(*args, **kw):\n s = ''\n for arg in args:\n if isinstance(arg, (int, float)):\n arg = str(arg)\n s += arg + ', '\n s += '{'\n for key in kw:\n s += key + ': ' + kw[key] + ', '\n s += '}'\n print(s)\n\nperson('Jack', 22, '180cm', city='chengdu', gender='man')\n\n\n# 小结\n\n# Python的函数具有非常灵活的参数形态,既可以实现简单的调用,又可以传入非常复杂的参数。\n# 默认参数一定要用不可变对象,如果是可变对象,程序运行时会有逻辑错误!\n# 要注意定义可变参数和关键字参数的语法:\n# *args是可变参数,args接收的是一个tuple;\n# **kw是关键字参数,kw接收的是一个dict。\n# 以及调用函数时如何传入可变参数和关键字参数的语法:\n# 可变参数既可以直接传入:func(1, 2, 3),又可以先组装list或tuple,再通过*args传入:func(*(1, 2, 3));\n# 关键字参数既可以直接传入:func(a=1, b=2),又可以先组装dict,再通过**kw传入:func(**{'a': 1, 'b': 2})。\n# 使用*args和**kw是Python的习惯写法,当然也可以用其他参数名,但最好使用习惯用法。\n# 命名的关键字参数是为了限制调用者可以传入的参数名,同时可以提供默认值。\n# 定义命名的关键字参数在没有可变参数的情况下不要忘了写分隔符*,否则定义的将是位置参数。\n","repo_name":"zenyuca/study-python3","sub_path":"syntax/function/Parameter.py","file_name":"Parameter.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"29439643654","text":"from jnius import autoclass, cast\n\n# Gets the current running instance of the app so as to speak\nmActivity = autoclass(\"org.kivy.android.PythonActivity\").mActivity\n\ncontext = mActivity.getApplicationContext()\nContext = autoclass(\"android.content.Context\")\nIntent = autoclass(\"android.content.Intent\")\nPendingIntent = autoclass(\"android.app.PendingIntent\")\nString = autoclass(\"java.lang.String\")\nInt = autoclass(\"java.lang.Integer\")\n\n\nintent = Intent()\nintent.setClass(context, Notify)\n# Here change the \"org.org.test\" to whatever package domain you have set.\n# Here my buildozer file has the package domain of \"org.test\".\n# After that \"NOTIFY\" is the custom action we have set. This custom action is\n# also defined in the manifest file(check README file). You can use any name here\n# just make sure you use the same name while registering the action event\nintent.setAction(\"org.org.test.NOTIFY\")\n# Create a pending intent to be fired later in time by the alarm Manager\n# Here the intent_id is a variable holding a numeric value that uniquely identifies the\n# pending intent. Keep this id so that you can cancel scheduled alarms later on.\n\n# There are various types of pending intent flags that can be set based on what you want.\n# Here the `FLAG_CANCEL_CURRENT` will cancel any other pending intent with the same id before\n# setting itself.\npending_intent = PendingIntent.getBroadcast(\n context, intent_id, intent, PendingIntent.FLAG_CANCEL_CURRENT\n)\n\n# This gets the current system time since epoch in milliseconds(works only in python 3.7+)\nring_time = time.time_ns() // 1_000_000\n# We now create the alarm and assign it to the system alarm manager. Some methods assign\n# an alarm manager instance to a variable and then scheduling a task. But if you need to\n# later cancel this alarm from another python file or from another launch of your app(as\n# every time you relaunch a kivy the app ,the code is rerun thus creating a new instance of\n# the alarm manager rather than the one we used before to schedule the alarm). THIS IS IMPORTANT\n# AS WE NEED TO USE THE SAME ALARM MANAGER INSTANCE TO CANCEL AN ALARM\n\ncast(\n AlarmManager, context.getSystemService(Context.ALARM_SERVICE)\n).setExactAndAllowWhileIdle(AlarmManager.RTC_WAKEUP, ring_time, pending_intent)\n\n# Here we use RTC_WAKEUP which uses the real time of the device to figure out when to fire the alarm\n","repo_name":"Guhan-SenSam/Android-Notification-in-Python","sub_path":"Schedule Notifications/schedulenotif.py","file_name":"schedulenotif.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"52"} +{"seq_id":"40045751437","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 1 00:57:39 2019\n\n@author: swetu\n\"\"\"\n\nimport numpy as np\nimport re\nimport pickle\nimport nltk\nfrom nltk.corpus import stopwords\nfrom sklearn.datasets import load_files\nnltk.download('stopwords')\n\n#importing datasets\n\nreviews = load_files('txt_sentoken/')\nX,y = reviews.data,reviews.target\n\n#persisting data\n#Storing as pickle Files\nwith open('X','wb') as f:\n pickle.dump(X,f)\n\nwith open('y.pickle','wb') as f:\n pickle.dump(y,f)\n \n#unpickling the dataset\n \nwith open('X.pickle','rb') as f:\n X = pickle.load(f)\n\nwith open('y.pickle','rb') as f:\n y = pickle.load(f) \n \n# preprocessing the dataset\n#creating the corpus\ncorpus = []\nfor i in range(0,len(X)): \n review = re.sub(r'\\W',' ',str(X[i])) \n review = review.lower()\n review = re.sub(r'\\s+[a-z]\\s+',' ',review)\n review = re.sub(r'^[a-z]\\s+',' ',review)\n review = re.sub(r'\\s+',' ',review)\n corpus.append(review)\n \n\n#Transforming Data into BOW model\n\n\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nvectorizer = TfidfVectorizer(max_features = 2000,min_df = 3,max_df = 0.6,stop_words= stopwords.words('english')) \nX = vectorizer.fit_transform(corpus).toarray()\n\n\n#training our model\nfrom sklearn.model_selection import train_test_split\ntext_train,text_test,sent_train,sent_test = train_test_split(X,y,test_size=0.2,random_state = 0)\n \nfrom sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression()\nclassifier.fit(text_train,sent_train)\n\n#Testing model performance\nsent_pred = classifier.predict(text_test)\n\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(sent_test,sent_pred)\n \n\n","repo_name":"ShwethaDeepak/Text-Classification","sub_path":"Text_Classification.py","file_name":"Text_Classification.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38400542060","text":"import yaml\nimport json\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nfrom torch.utils.data import DataLoader\nimport os\nfrom tqdm import tqdm\n\nfrom network.deep_sdf_network import Decoder\nfrom utils.sampling import SDF_Dataset\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description=\"Training Deep SDF\")\n parser.add_argument(\n \"--config\",\n dest=\"config\",\n required=True,\n help=\"Path to config file\"\n )\n\n parser.add_argument(\n \"--output\", \"-o\",\n dest=\"save_dir\",\n default=os.path.join(os.path.dirname(__file__), \"results\", \"checkpoints\"),\n help=\"Path to directory where checkpoints are saved\"\n )\n\n parser.add_argument(\n \"--visualize\", \"-v\",\n dest=\"visualize\",\n action=\"store_true\",\n help=\"Flag for visualization of loss\"\n )\n\n parser.add_argument(\n \"--no-save\",\n dest=\"save\",\n action=\"store_false\",\n help=\"Flag for not saving trained models/latent vectors\"\n )\n\n args = parser.parse_args()\n return args\n\ndef save_model(directory, filename, model, epoch):\n torch.save(model.state_dict(), os.path.join(directory, filename+\"%d.pth\" % (epoch+1)))\n\ndef main(args, save_dir, visualize, save_models):\n # Parse provided specs\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") \n latent_dim = args['latent_dim']\n network_kwargs = args['network_specs']\n n_epochs = args['epochs']\n n_subsamples = args['samples_per_scene'] if 'samples_per_scene' in args else None\n\n # Make save directories\n os.makedirs(save_dir, exist_ok=True)\n os.makedirs(os.path.join(save_dir, \"decoder\"), exist_ok=True)\n os.makedirs(os.path.join(save_dir, \"latent_vecs\"), exist_ok=True)\n\n with open(args['train_test_split'], 'r') as file:\n training_paths = json.load(file)['train']\n training_paths = [os.path.join(args['data_dir'], path) for path in training_paths]\n\n # Create dataset and dataloader\n dataset = SDF_Dataset(training_paths, n_subsamples, device)\n data_loader = DataLoader(dataset, batch_size=args['batch_size'], shuffle=True)\n\n # Initialize decoder from provided specs\n decoder = Decoder(latent_dim, **network_kwargs).to(device)\n\n # Create latent vectors and initialize weights\n latent_vecs = torch.nn.Embedding(len(dataset), latent_dim, max_norm=args['latent_vec_bound'], device=device)\n torch.nn.init.normal_(latent_vecs.weight.data, mean=0.0, std=1.0 / np.sqrt(latent_dim))\n\n l1_loss = torch.nn.L1Loss(reduction=\"sum\")\n\n # Initialize optimizer for decoder and latent vectors with provided LR schedule\n optimizer = torch.optim.Adam(\n [\n {\n \"params\": decoder.parameters(),\n \"lr\": args['network_lr_schedule']['start']\n },\n {\n \"params\": latent_vecs.parameters(),\n \"lr\": args['latent_vec_lr_schedule']['start']\n }\n ]\n )\n\n if visualize:\n loss_log = np.zeros((n_epochs,))\n\n decoder.train()\n pbar = tqdm(total=n_epochs, desc=\"Deep SDF Training\")\n for epoch in range(n_epochs):\n # Updates learning rates according to specified schedule\n optimizer.param_groups[0]['lr'] = args['network_lr_schedule']['start'] * args['network_lr_schedule']['decay'] ** (epoch // args['network_lr_schedule']['interval'])\n optimizer.param_groups[1]['lr'] = args['latent_vec_lr_schedule']['start'] * args['latent_vec_lr_schedule']['decay'] ** (epoch // args['latent_vec_lr_schedule']['interval'])\n\n for data, obj_idxs in data_loader:\n data.requires_grad = False\n data = data.reshape(-1, 4) # stack inputs into 2D\n points = data[:,:3] # get [x y z] coordinates\n sdf_true = torch.clamp(data[:,-1], -args['sdf_clamping_dist'], args['sdf_clamping_dist']).unsqueeze(1) # get ground truth SDF value and clamp between provided values\n n_samples = data.shape[0]\n\n # chunk data\n points = torch.chunk(points, args['batch_split']) # (batch_split, n, 3)\n sdf_true = torch.chunk(sdf_true, args['batch_split']) # (batch_split, n, 1)\n obj_idxs = torch.chunk(obj_idxs.to(device).unsqueeze(-1).repeat(1, n_subsamples).view(-1), args['batch_split'])\n\n batch_loss = 0.0\n optimizer.zero_grad()\n # Iterate through minibatches\n for i in range(args['batch_split']):\n batch_latent_vecs = latent_vecs(obj_idxs[i])\n inp = torch.cat((batch_latent_vecs, points[i]), dim=1) # stack latent vectors and 3D coordinates\n\n # Forward pass\n sdf_pred = decoder(inp)\n sdf_pred = torch.clamp(sdf_pred, -args['sdf_clamping_dist'], args['sdf_clamping_dist']) # clamp predictions to match GT\n \n # Compute L1 Loss\n batch_split_loss = l1_loss(sdf_pred, sdf_true[i]) / n_samples\n\n # Compute regularization loss on latent vectors\n if args['latent_vec_regularization']:\n latent_vec_reg_loss = args['latent_vec_reg_lambda'] * min(1, epoch/100) * torch.sum(torch.norm(batch_latent_vecs, p=2, dim=1)) / n_samples\n batch_split_loss += latent_vec_reg_loss\n\n # Do backprop\n batch_split_loss.backward()\n batch_loss += batch_split_loss\n\n if visualize:\n loss_log[epoch] = batch_loss.detach().cpu() / args['batch_split']\n\n # Gradient descent\n optimizer.step()\n\n # Save checkpoints at specified frequencies\n if (epoch+1) % args['save_freq'] == 0 and save_models:\n save_model(os.path.join(save_dir, \"decoder\"), \"decoder\", decoder, epoch)\n save_model(os.path.join(save_dir, \"latent_vecs\"), \"latent_vecs\", latent_vecs, epoch)\n\n pbar.update(1)\n\n if visualize:\n plt.plot(np.arange(n_epochs), loss_log, 'b-')\n plt.xlabel(\"epoch\")\n plt.ylabel(\"loss\")\n plt.show()\n\nif __name__ == \"__main__\":\n args = get_args()\n with open(args.config, 'r') as file:\n config_args = yaml.safe_load(file)\n\n main(config_args, args.save_dir, args.visualize, args.save)","repo_name":"sarveshmayil/deepsdf","sub_path":"train_deep_sdf.py","file_name":"train_deep_sdf.py","file_ext":"py","file_size_in_byte":6303,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"16546890779","text":"from rest_framework import serializers\n\nfrom . import models\nfrom . import constants\n\npriority_rev_dict = {v:k for k, v in constants.PRIORITY_CHOICES}\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.User\n fields = '__all__'\n\n\nclass TagSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Tag\n fields = '__all__'\n\n\nclass TicketSerializer(serializers.ModelSerializer):\n tag = serializers.CharField(required=False)\n\n def create(self, data):\n if data.get('tag'):\n tag_name = data.get('tag').lower()\n tag, created = models.Tag.objects.get_or_create(\n name=tag_name)\n data['tag'] = tag\n data = super(\n TicketSerializer, self).create(data)\n return data\n \n def update(self, data):\n if data.get('tag'):\n tag_name = data.get('tag').lower()\n tag, created = models.Tag.objects.get_or_create(\n name=tag_name)\n data['tag'] = tag\n data = super(\n TicketSerializer, self).update(data)\n return data\n \n def to_representation(self, instance):\n tag = instance.tag\n data = super(\n TicketSerializer, self).to_representation(instance)\n if tag and data.get('tag'):\n data['tag'] = tag.name\n else:\n data['tag'] = ''\n data['priority'] = priority_rev_dict.get(data.get('priority'), 'Normal')\n return data\n\n class Meta:\n model = models.Ticket\n exclude = ('created', 'modified', 'assignee')\n","repo_name":"cholarajaa/protaskapi","sub_path":"ticket/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"30293390648","text":"from pyspark.sql.functions import col\nfrom geh_stream.codelists import SettlementMethod, MeteringPointType\n\n\n# VR.611\n#\n# The energy quantity for a E17 (consumption metering point) must be below 100.000 kwh for hour settled,\n# else generate an error message E90 is generated (according to VR 611). This is per position.\n# The sender can choose not to assign a value to energy quantity, this is accepted.\n#\n# It is not necessary to check that the resolution is hourly because it is given when settlement method is non-profiled.\ndef validate_vr_611(df):\n consumptionLimit = 1E5 # 100.000 kWh\n\n return df \\\n .withColumn(\"VR-611-Is-Valid\",\n ~ # Negate the below expression to make it an is-valid instead of is-invalid\n (\n # Expression for the exact situation where the violation is determined to have occurred\n col(\"pd.series_point_quantity\").isNotNull()\n & (col(\"md.meteringPointType\") == MeteringPointType.consumption.value)\n & (col(\"md.settlementMethod\") == SettlementMethod.non_profiled.value)\n & (col(\"pd.series_point_quantity\") >= consumptionLimit)\n ))\n","repo_name":"Energinet-DataHub/ARCHIVED-geh-timeseries","sub_path":"obsolete/source/streaming/geh_stream/validation/rules/vr_611.py","file_name":"vr_611.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"34572475688","text":"#This program is used to find sum of list odd and even\n\n\ndef sums(num):\n sum1=0\n sum2=0\n\n for i in num:\n if(i%2==0):\n sum1+=i\n else:\n sum2+=i\n \n return sum1,sum2\n\n#main\nif __name__ == \"__main__\":\n lists = [0,1,2,3,4,5,6,7,8,9,10]\n even, odd = sums(lists)\n print(\"Odd valuse of list sum is: \", odd)\n print(\"Even valuse of list sum is: \", even)\n","repo_name":"KrishothKumar/Python_Practice","sub_path":"list_sum.py","file_name":"list_sum.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"75314314723","text":"import tkinter\nfrom tkinter.messagebox import showinfo as alert\nfrom tkinter.messagebox import askyesno as question\nfrom tkinter.simpledialog import askstring as prompt\nimport customtkinter\n\n\n'''\nnombre: Martín\napellido: Morales\n\nEnunciado:\nAl presionar el botón \"Mostrar Iteración\", mostrar mediante alert \n10 repeticiones con números ASCENDENTE desde el 1 al 10\n'''\n\n\nclass App(customtkinter.CTk):\n \n def __init__(self):\n super().__init__()\n\n self.title(\"UTN FRA\")\n \n self.btn_mostrar_iteracion = customtkinter.CTkButton(master=self, text=\"Mostrar iteración\", command=self.btn_mostrar_iteracion_on_click)\n self.btn_mostrar_iteracion.grid(row=2, pady=20, columnspan=2, sticky=\"nsew\")\n \n \n def btn_mostrar_iteracion_on_click(self):\n # ingreso = 1\n # ingreso = int(ingreso)\n\n # alert(\"Inicio\", \"Inicio\")\n # while ingreso != 10:\n # alert(\"Ingresado\", ingreso)\n # ingreso += 1\n \n # alert(\"Fin\", \"Fin\")\n\n\n # respuesta = True\n\n # acumulador_precios = 0 \n # alert(\"Incio\", \"Inicio\")\n\n # while respuesta:\n # precio = float(prompt(\"Ingreso\", \"Ingrese un numero\"))\n # contador_arg += 1\n # acumulador_precios += precio\n # respuesta = question(\"Pregunta\", \"Dese repetir?\")\n\n # alert(\"Fin\", \"Precio total: \" + str(acumulador_precios))\n\n # ingreso_numero = 1\n\n # while ingreso_numero != 10:\n # alert(title=\"Ingreso num\", message=ingreso_numero)\n # ingreso_numero +=1\n \n # alert(\"Fin\", \"FIn\")\n\n # contador = 1\n\n # while contador <= 10:\n # alert(title=\"Número\", message=contador)\n # contador = contador + 1\n \n\n #Clase Lune 17\n #ejemplo de prmedio de notas, etc\n contador = 0\n acumulador = 0\n numero = 1\n numero = int(prompt(\"Numero\", \"Ingrese un numero\"))\n #op1 != op2\n\n #not True = False\n #not False = True\n\n # \"Algo\" .isdigit() => True 0-9, de lo contrario false\n # str.isalpha() => True a-z A-Z, de lo contrario false\n # str.alnum() => True a-zA-Z\n\n while contador < 5 and numero != 0:\n \n acumulador += numero\n contador += 1\n \n print(\"La sumatoria es: \", str(acumulador))\n\n\n\n\n \nif __name__ == \"__main__\":\n app = App()\n app.mainloop()","repo_name":"Mrtin99/Clase_Python_UTN","sub_path":"04_instruccion_while/while_01.py","file_name":"while_01.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40788471292","text":"from django.http import HttpResponseRedirect\nfrom django_comments.models import Comment\nfrom blog.models import Blog\n\n\ndef comment_done(request):\n if request.GET['c']:\n comment_id = request.GET['c']\n comment = Comment.objects.get(pk=comment_id)\n blog = Blog.objects.get(id=comment.object_pk)\n if blog:\n return HttpResponseRedirect(blog.get_absolute_url())","repo_name":"workcode/simplelifetest","sub_path":"apps/custom_comments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4713497213","text":"\"\"\"\nHelpers for dealing with vectorized environments.\n\"\"\"\n\nfrom collections import OrderedDict\n\nimport gym\nimport numpy as np\n\n\ndef copy_obs_dict(obs):\n \"\"\"\n Deep-copy an observation dict.\n \"\"\"\n return {k: np.copy(v) for k, v in obs.items()}\n\n\ndef dict_to_obs(obs_dict):\n \"\"\"\n Convert an observation dict into a raw array if the\n original observation space was not a Dict space.\n \"\"\"\n if set(obs_dict.keys()) == {None}:\n return obs_dict[None]\n return obs_dict\n\n\ndef obs_space_info(obs_space):\n \"\"\"\n Get dict-structured information about a gym.Space.\n\n Returns:\n A tuple (keys, shapes, dtypes):\n keys: a list of dict keys.\n shapes: a dict mapping keys to shapes.\n dtypes: a dict mapping keys to dtypes.\n \"\"\"\n if isinstance(obs_space, gym.spaces.Dict):\n assert isinstance(obs_space.spaces, OrderedDict)\n subspaces = obs_space.spaces\n elif isinstance(obs_space, gym.spaces.Tuple):\n assert isinstance(obs_space.spaces, tuple)\n subspaces = {i: obs_space.spaces[i] for i in range(len(obs_space.spaces))}\n else:\n subspaces = {None: obs_space}\n keys = []\n shapes = {}\n dtypes = {}\n for key, box in subspaces.items():\n keys.append(key)\n shapes[key] = box.shape\n dtypes[key] = box.dtype\n return keys, shapes, dtypes\n\n\ndef obs_to_dict(obs):\n \"\"\"\n Convert an observation into a dict.\n \"\"\"\n if isinstance(obs, dict):\n return obs\n return {None: obs}\n","repo_name":"openai/baselines","sub_path":"baselines/common/vec_env/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":14949,"dataset":"github-code","pt":"52"} +{"seq_id":"21032350016","text":"\"\"\"\nLogs when a member leaves.\n\"\"\"\nfrom discord.ext import commands\n\nfrom zorak.utilities.cog_helpers._embeds import embed_leave # pylint: disable=E0401\n\n\nclass LoggingLeaving(commands.Cog):\n \"\"\"\n Simple listener to on_member_remove\n then checks the audit log for exact details\n \"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.Cog.listener()\n async def on_member_remove(self, member):\n \"\"\"\n First we don't log leaves for unapproved people.\n then we grab the guild, and from there read the last entry in the audit log.\n \"\"\"\n if \"Needs Approval\" in [role.name for role in member.roles]:\n return\n\n current_guild = self.bot.get_guild(self.bot.server_settings.server_info[\"id\"])\n audit_log = [entry async for entry in current_guild.audit_logs(limit=1)][0]\n\n if str(audit_log.action) != \"AuditLogAction.ban\" and str(audit_log.action) != \"AuditLogAction.kick\":\n embed = embed_leave(member)\n\n logs_channel = await self.bot.fetch_channel(self.bot.server_settings.log_channel[\"join_log\"])\n await logs_channel.send(embed=embed)\n\n\ndef setup(bot):\n \"\"\"\n Necessary for loading the cog into the bot instance.\n \"\"\"\n bot.add_cog(LoggingLeaving(bot))\n","repo_name":"practical-python-org/ZorakBot","sub_path":"src/zorak/cogs/logging/logging_member_leaving.py","file_name":"logging_member_leaving.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"52"} +{"seq_id":"40187094887","text":"from Deck_Of_Cards import Deck_Of_Cards\nfrom Player import Player\nclass Card_Game:\n \"\"\"Represent a card game between two Players\n each player has list of random cards\n the one who has more cards in his hand his the winner of th game\"\"\"\n def __init__(self,name_p1,name_p2,num_p1=26,num_p2=26):\n if type(name_p1)!=str:\n raise TypeError(\"Argument name_p1 must be str\")\n if type(name_p2)!=str:\n raise TypeError(\"Argument name_p2 must be str\")\n if type(num_p1)!=int:\n raise TypeError(\"Argument num_p1 must be int\")\n if num_p1<10 or num_p1>26:\n num_p1=26\n if type(num_p2)!=int:\n raise TypeError(\"Argument num_p2 must be int\")\n if num_p2<10 or num_p2>26:\n num_p2=26\n if num_p1>num_p2 or num_p2>num_p1:\n num_p2=num_p1\n self.player1=Player(name_p1,num_p1)\n self.player2=Player(name_p2,num_p2)\n self.deck_cards=Deck_Of_Cards()\n self.game=self.new_game()\n\n def new_game(self):\n \"\"\"Start a new game between 2 players only when the deck_cards is full- 52 cards\n and each player has 0 cards in his hand\"\"\"\n if len(self.deck_cards.cards) == 52 and len(self.player1.cards)==0 and len(self.player2.cards)==0:\n self.deck_cards.cards_shuffle()\n self.player1.set_hand(self.deck_cards)\n self.player2.set_hand(self.deck_cards)\n else:\n print(\"Error cant start new game\")\n\n def get_winner(self):\n \"\"\"Checks which player has more cards in his deck\n the player that has more cards his the winner of the game\"\"\"\n if len(self.player1.cards)>len(self.player2.cards):\n return f\"The winner of the game is: {self.player1.name.title()}\"\n if len(self.player2.cards)>len(self.player1.cards):\n return f\"The winner of the game is: {self.player2.name.title()}\"\n else:\n return None\n\n\n\n\n","repo_name":"Tomeriko990/project","sub_path":"game_cards/Card_Game.py","file_name":"Card_Game.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71040846244","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 15 15:51:49 2018\r\n\r\n@author: mlopes\r\n\"\"\"\r\n\r\n\r\nclass Node():\r\n def __init__(self, prob, parents = []):\r\n self.parents = parents\r\n self.prob = prob\r\n\r\n\r\n def computeProb(self, evid):\r\n if (self.parents == []):\r\n return [1 - self.prob[0], self.prob[0]]\r\n\r\n else:\r\n\r\n prob = self.prob\r\n\r\n for i in self.parents:\r\n prob = prob[evid[i]]\r\n\r\n return [1 - prob, prob]\r\n\r\n\r\nclass BN():\r\n def __init__(self, gra, prob):\r\n self.prob = prob\r\n self.gra = gra\r\n\r\n def aux(self, evid, post, value, post_value):\r\n flag = 0\r\n for i in range(0, len(evid)):\r\n if evid[i] == [] :\r\n flag = 1\r\n for j in range(0, 2):\r\n evid_copy = evid.copy()\r\n evid_copy[i] = j\r\n value = self.aux(evid_copy, post, value, post_value)\r\n break\r\n if flag:\r\n return value\r\n else:\r\n evid[post] = post_value\r\n return value + self.computeJointProb(evid)\r\n\r\n\r\n def computePostProb(self, evid):\r\n post = -1\r\n for i in range(0, len(evid)):\r\n if(evid[i] == -1):\r\n post = i\r\n break\r\n\r\n #marginalizar\r\n izero = self.aux( list(evid), post, 0, 0)\r\n ione = self.aux( list(evid), post, 0, 1)\r\n\r\n #multiplicar pelo alpha\r\n res = (1/(izero + ione)) * ione\r\n\r\n return res\r\n\r\n\r\n def computeJointProb(self, evid):\r\n\r\n result = 1;\r\n #aplicar teorema de Bayes\r\n for i in range(0, len(evid)):\r\n result *= self.prob[i].computeProb(evid)[evid[i]]\r\n\r\n return result\r\n","repo_name":"carolinacarreira/AI-Project","sub_path":"BN.py","file_name":"BN.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30575208412","text":"'''\ncontrol.py\nCreated on Nov 15, 2015\n\n@author: prefalo\n'''\nimport random\nimport string\nimport time\nfrom queue import Queue\nfrom output import OutThread\nfrom worker import WorkerThread\n\nstart_time = time.time()\nWORKERS = 10\n\ninq = Queue(maxsize=int(WORKERS*1.5))\noutq = Queue(maxsize=int(WORKERS*1.5))\n\not = OutThread(WORKERS, outq)\not.start()\n\nfor i in range(WORKERS):\n w = WorkerThread(inq, outq)\n w.start()\n#instring = input(\"Words of wisdom: \")\ninstring = ''.join(random.choice(string.ascii_uppercase) for _ in range(1000))\nfor work in enumerate(instring):\n inq.put(work)\nfor i in range(WORKERS):\n inq.put(None)\ninq.join()\nprint(\"Control thread terminating\")\n\ndelta = time.time() - start_time\nprint(\"The script took --- %.5f seconds --- to run\" % delta)\n\nif __name__ == '__main__':\n pass\n\n ","repo_name":"paulrefalo/Python-2---4","sub_path":"workspace/Python4_Homework11/src/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70581119205","text":"import tensorflow as tf\nimport numpy as np\nimport time\nimport cv2\nimport random\n\nfrom setup_cifar import CIFAR, CIFARModel\nfrom setup_mnist import MNIST, MNISTModel\nfrom setup_inception2 import ImageNet, InceptionModel\n\nfrom l2_attack import CarliniL2\nfrom l0_attack import CarliniL0\nfrom li_attack import CarliniLi\n\n\ndef show(img):\n \"\"\"\n Show MNSIT digits in the console.\n \"\"\"\n remap = \" .*#\"+\"#\"*100\n img = (img.flatten()+.5)*3\n if len(img) != 784: return\n print(\"START\")\n for i in range(28):\n print(\"\".join([remap[int(round(x))] for x in img[i*28:i*28+28]]))\n\n\ndef generate_data(data, samples, targeted=True, start=0, inception=False):\n \"\"\"\n Generate the input data to the attack algorithm.\n\n data: the images to attack\n samples: number of samples to use\n targeted: if true, construct targeted attacks, otherwise untargeted attacks\n start: offset into data to use\n inception: if targeted and inception, randomly sample 100 targets intead of 1000\n \"\"\"\n inputs = []\n targets = []\n for i in range(samples):\n if targeted:\n if inception:\n seq = random.sample(range(1,1001), 10)\n else:\n seq = range(data.test_labels.shape[1])\n\n for j in seq:\n if (j == np.argmax(data.test_labels[start+i])) and (inception == False):\n continue\n inputs.append(data.test_data[start+i])\n targets.append(np.eye(data.test_labels.shape[1])[j])\n else:\n inputs.append(data.test_data[start+i])\n targets.append(data.test_labels[start+i])\n\n inputs = np.array(inputs)\n targets = np.array(targets)\n\n return inputs, targets\n\n\nif __name__ == \"__main__\":\n #with tf.Session() as sess:\n sess = tf.Session()\n #data, model = MNIST(), MNISTModel(\"models/mnist\", sess)\n #data, model = CIFAR(), CIFARModel(\"models/cifar\", sess)\n data = ImageNet()\n model = InceptionModel(sess)\n attack = CarliniL2(sess, model, batch_size=1, max_iterations=1000, confidence=0)\n #attack = CarliniL0(sess, model, max_iterations=1000, initial_const=10,\n # largest_const=15)\n label_name = []\n from scipy.misc import imread\n input1 = tf.placeholder(tf.float32, shape=(299,299,3))\n check1 = model.predict(input1)\n for i in range(9):\n \n print(\" 200\"+str(i))\n x1 = imread('C_W_data/200' + str(i) + '.png')\n x1 = np.array(x1, dtype = np.float32)\n x1 = x1 / 255\n x1 = x1 - .5\n \n #sess.run(tf.global_variables_initializer())\n A1 = sess.run(check1, feed_dict={input1:x1} )\n print(\"predict id: \", np.argmax(A1[0]) )\n label_name.append( np.argmax(A1[0]) )\n #tf.reset_default_graph()\n txt = open(\"output.txt\",\"w\")\n for i in range(9):\n txt.write(str(label_name[i]))\n txt.close()\n\n","repo_name":"KevinHuang841006/AI_Security_training","sub_path":"rescaling_defense/check_test.py","file_name":"check_test.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1554869708","text":"# for csv operation\nimport csv\n# to access images in folder\nimport glob\nimport tkinter as tk\n# for GUI\nfrom tkinter import *\n# for file dialog\nfrom tkinter import filedialog\n\n# for image operations\nimport cv2\n# for basic image processing\nimport imutils\n# for image operations to arrays\nimport numpy as np\n# image operations\nfrom PIL import ImageTk, Image\n\n\n\nclass ColorDescriptor:\n def __init__(self, bins):\n # store the number of bins for the 3D histogram\n self.bins = bins\n\n def describe(self, image):\n # convert the image to the HSV color space and initialize\n # the features used to quantify the image\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n features = []\n\n # grab the dimensions and compute the center of the image\n (h, w) = image.shape[:2]\n (cX, cY) = (int(w * 0.5), int(h * 0.5))\n\n # divide the image into four rectangles/segments (top-left,\n # top-right, bottom-right, bottom-left)\n segments = [(0, cX, 0, cY), (cX, w, 0, cY), (cX, w, cY, h),(0, cX, cY, h)]\n\n # construct an elliptical mask representing the center of the\n # image\n (axesX, axesY) = (int(w * 0.75) // 2, int(h * 0.75) // 2)\n ellipMask = np.zeros(image.shape[:2], dtype=\"uint8\")\n cv2.ellipse(ellipMask, (cX, cY), (axesX, axesY), 0, 0, 360, 255, -1)\n\n # loop over the segments\n for (startX, endX, startY, endY) in segments:\n # construct a mask for each corner of the image, subtracting\n # the elliptical center from it\n cornerMask = np.zeros(image.shape[:2], dtype=\"uint8\")\n cv2.rectangle(cornerMask, (startX, startY), (endX, endY), 255, -1)\n cornerMask = cv2.subtract(cornerMask, ellipMask)\n\n # extract a color histogram from the image, then update the\n # feature vector\n hist = self.histogram(image, cornerMask)\n features.extend(hist)\n\n # extract a color histogram from the elliptical region and\n # update the feature vector\n hist = self.histogram(image, ellipMask)\n features.extend(hist)\n\n # return the feature vector\n return features\n\n def histogram(self, image, mask):\n # extract a 3D color histogram from the masked region of the\n # image, using the supplied number of bins per channel\n hist = cv2.calcHist([image], [0, 1, 2], mask, self.bins,[0, 180, 0, 256, 0, 256])\n\n # normalize the histogram if we are using OpenCV 2.4\n if imutils.is_cv2():\n hist = cv2.normalize(hist).flatten()\n\n # otherwise handle for OpenCV 3+\n else:\n hist = cv2.normalize(hist, hist).flatten()\n\n # return the histogram\n return hist\n\n\nclass Searcher:\n def __init__(self, indexPath):\n # store our index path\n self.indexPath = indexPath\n\n def search(self, queryFeatures, limit=10):\n # initialize our dictionary of results\n results = {}\n\n # open the index file for reading\n with open(self.indexPath) as f:\n # initialize the CSV reader\n reader = csv.reader(f)\n\n # loop over the rows in the index\n for row in reader:\n # parse out the image ID and features, then compute the\n # chi-squared distance between the features in our index\n # and our query features\n features = [float(x) for x in row[1:]]\n d = self.chi2_distance(features, queryFeatures)\n\n # now that we have the distance between the two feature\n # vectors, we can udpate the results dictionary -- the\n # key is the current image ID in the index and the\n # value is the distance we just computed, representing\n # how 'similar' the image in the index is to our query\n results[row[0]] = d\n\n # close the reader\n f.close()\n\n # sort our results, so that the smaller distances (i.e. the\n # more relevant images are at the front of the list)\n results = sorted([(v, k) for (k, v) in results.items()])\n\n # return our (limited) results\n return results[:limit]\n\n def chi2_distance(self, histA, histB, eps=1e-10):\n # compute the chi-squared distance\n d = 0.5 * np.sum([((a - b) ** 2) / (a + b + eps)\n for (a, b) in zip(histA, histB)])\n\n # return the chi-squared distance\n return d\n\n\ndef open_folder():\n global folder_name\n global result_data\n global current_index\n result_data = []\n current_index = 0\n # To select dataset folder\n folder_name = filedialog.askdirectory()\n cd = ColorDescriptor((8, 12, 3))\n output = open(folder_name + \"/index.csv\", \"w\") # creating csv file\n filecount = 1\n for imagePath in glob.glob(folder_name + \"//\" \"*.*\"):\n if imagePath.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif')):\n # printing indexed images\n text_output.insert(1.0, str(filecount) + \" : \" + imagePath + \"\\n\")\n filecount += 1\n imageID = imagePath[imagePath.rfind(\"/\") + 1:]\n image = cv2.imread(imagePath)\n features = cd.describe(image)\n # write the features to file\n features = [str(f) for f in features]\n output.write(imagePath + \",\" + \",\".join(features) + \"\\n\")\n text_output.insert(1.0, \"Indexed images : \\n\")\n output.close()\n\n\ndef select_image():\n global image_file\n global result_data\n result_data = []\n # selecting image to search\n image_file = filedialog.askopenfilename(initialdir=\"C:\", title=\"Open image\",\n filetypes=((\"jpeg files\", \"*.jpg\"),(\"png files\", \"*.png\"), (\"all files\", \"*.*\")))\n # adding image to result file\n result_data.append(image_file)\n text_output.delete(1.0, END)\n # printing similar images on console\n text_output.insert(1.0, image_file)\n text_output.insert(1.0, \"Selected image : \")\n\n width = 400\n height = 400\n img = Image.open(image_file)\n img = img.resize((width, height), Image.ANTIALIAS)\n img1 = ImageTk.PhotoImage(img)\n image_panel.configure(image=img1)\n image_panel.image = img1\n\n\ndef search_similar():\n global image_file\n global result_data\n global folder_name\n global current_index\n\n # initialize the image descriptor\n cd = ColorDescriptor((8, 12, 3))\n\n current_index = 0\n text_output.delete(1.0, END)\n\n # load the query image and describe it\n query = cv2.imread(image_file)\n features = cd.describe(query)\n\n # perform the search\n searcher = Searcher(folder_name + \"/index.csv\")\n results = searcher.search(features)\n\n if (len(results) == 0):\n text_output.delete(1.0, END)\n text_output.insert(1.0, \"No similar images found...\")\n else:\n count = 1\n for (score, resultID) in results:\n # adding reults to the album\n result_data.append(resultID)\n text_output.insert(1.0, str(count) + \" \" + resultID + \"\\n\")\n count += 1\n\n width = 400\n height = 400\n img = Image.open(result_data[0])\n img = img.resize((width, height), Image.ANTIALIAS)\n img1 = ImageTk.PhotoImage(img)\n image_panel.configure(image=img1)\n image_panel.image = img1\n current_index = 1\n text_output.insert(1.0, \"Total \" + str(len(result_data) - 1) + \" similar images found \\n \")\n display()\n\n\ndef display():\n # function to display similar images\n width = 400\n height = 400\n if (len(result_data) > 1):\n # text_output.delete(1.0, END)\n # text_output.insert(1.0, str(len(result_data)))\n\n img = Image.open(result_data[current_index])\n img = img.resize((width, height), Image.ANTIALIAS)\n img1 = ImageTk.PhotoImage(img)\n image_panel.configure(image=img1)\n image_panel.image = img1\n\n\ndef position():\n # for result check\n if len(result_data) != 0:\n return TRUE\n else:\n return FALSE\n\n\ndef next():\n # function to display next image\n global current_index\n if position() and (current_index + 1 != len(result_data)):\n current_index += 1\n display()\n else:\n pass\n\n\ndef back():\n # function to display previous image\n global current_index\n if position() and current_index != 0:\n current_index -= 1\n display()\n else:\n pass\n\n\n\napp = Tk()\napp.geometry(\"1200x600\")\n\nresult_data = []\ncurrent_index = 0\napp.title(\"content based image retrieval\")\napp.resizable(0, 0)\n\n# frame to store button\nbutton_frame = tk.Frame(app, relief=RIDGE, width=1100, height=50, borderwidth=3)\nbutton_frame.pack(side=TOP)\nbutton_frame.pack_propagate(0)\n\nfolder_button = Button(button_frame, text=\" select dataset [Image Folder] \", command=open_folder, relief=RIDGE,\n bg=\"white\")\nfolder_button.pack(side=LEFT)\n\nimage_button = Button(button_frame, text=\" select image \", command=select_image, relief=RIDGE, bg=\"white\")\nimage_button.pack(side=LEFT)\n\nsearch_button = Button(button_frame, text=\" search \", command=search_similar, relief=RIDGE, bg=\"white\")\nsearch_button.pack(side=LEFT)\n\n# frame to display image meta data\nresult_frame = tk.Frame(app, relief=RIDGE, width=400, height=500, borderwidth=3, )\nresult_frame.pack(side=LEFT)\nresult_frame.pack_propagate(0)\n\ntext_output = Text(result_frame)\ntext_output.pack(expand=True, fill='both')\n\nimage_output = tk.Frame(app, relief=RIDGE, width=800, height=500, borderwidth=3)\nimage_output.pack(side=LEFT)\nimage_output.pack_propagate(0)\n\nimage_panel = Label(image_output)\nimage_panel.pack(side=TOP)\n\n# frame to display similar images\nalbum_frame = tk.Frame(image_output, relief=RIDGE, width=800, height=50, borderwidth=3)\nalbum_frame.pack(side=BOTTOM)\n\nback_button = Button(album_frame, text=\"<<\", command=back)\nback_button.pack(side=LEFT)\n\nnext_button = Button(album_frame, text=\">>\", command=next)\nnext_button.pack(side=RIGHT)\n\napp.mainloop()\n","repo_name":"siddhusalvi/CBIR-data","sub_path":"cbir.py","file_name":"cbir.py","file_ext":"py","file_size_in_byte":10076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71332724006","text":"import unittest\n\nfrom crawler.dicom import _is_start_or_end, _get_tag, _get_value, get_results\n\n\nclass DicomTest(unittest.TestCase):\n def test_start(self):\n line = 'W: ---------------------------'\n start = _is_start_or_end(line)\n self.assertEqual(True, start)\n\n def test_tag(self):\n line = 'W: (0008,0052) CS [STUDY ]'\n tag = _get_tag(line)\n self.assertEqual('QueryRetrieveLevel', tag)\n\n def test_value(self):\n line = 'W: (0020,000d) UI [9.2.841.113619.6.95.31.0.3.4.1.24.13.6109561]'\n tag = _get_value(line)\n self.assertEqual('9.2.841.113619.6.95.31.0.3.4.1.24.13.6109561', tag)\n\n\n def test_all(self):\n with open('tests/test_data2') as f:\n test_data = f.read().splitlines()\n r = get_results(test_data)\n self.assertEqual(1, len(r))\n print(len(r))","repo_name":"pacs-ris-crawler/pacs-ris-crawler","sub_path":"crawler/tests/test_dicom2.py","file_name":"test_dicom2.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"74074576165","text":"from DistMeasures import jaccard_distance\r\n\r\nclass ClusterNode(object):\r\n def __init__(self, samples = None, children1 = None, children2 = None):\r\n if children1 and children2:\r\n self.samples = children1.samples|children2.samples\r\n self.children = {children1, children2}\r\n elif samples is not None:\r\n self.samples = {samples}\r\n self.n_samples = len(self.samples)\r\n\r\nclass HierarchicalClustering(object):\r\n\r\n def __init__(self, samples = None, target = 1, linkage = 'avglkge', metric = jaccard_distance):\r\n\r\n self.samples = samples\r\n self.linkage = linkage\r\n self.metric = metric\r\n self.target = target\r\n\r\n self.distsort = None\r\n self.distMatrix = None\r\n self.clusters = [ClusterNode(i) for i in range(len(samples))]\r\n self.n_clusters = len(samples)\r\n\r\n self.__creat_distMatrix()\r\n self.__creat_distsort()\r\n\r\n def avglkge(self,n1,n2):\r\n TD = 0.0\r\n n = 0\r\n for i in n1.samples:\r\n for j in n2.samples:\r\n n += 1\r\n TD += self.distMatrix[i][j]\r\n return (TD / n)\r\n\r\n def __creat_distMatrix(self):\r\n if self.distMatrix is None:\r\n n_samples = len(self.samples)\r\n distMatrix = [[0]*n_samples for i in range(n_samples)]\r\n for i in range(n_samples-1):\r\n for j in range(i + 1, n_samples):\r\n dist = round(self.metric(self.samples[i], self.samples[j]),3)\r\n distMatrix[i][j], distMatrix[j][i] = dist, dist\r\n self.distMatrix = distMatrix\r\n\r\n def __creat_distsort(self):\r\n if self.distsort is None:\r\n n_samples = len(self.samples)\r\n distMatrix = self.distMatrix\r\n distlist = []\r\n for i in range(n_samples-1):\r\n for j in range(i + 1, n_samples):\r\n dist = distMatrix[i][j]\r\n distlist.append(({i},{j},dist))\r\n self.distsort = sorted(distlist, key=lambda d:d[2])\r\n\r\n def __merge_mix(self):\r\n cluster1, cluster2 = self.distsort[0][0], self.distsort[0][1]\r\n merge_node1,merge_node2 = None,None\r\n i = 0\r\n while merge_node1 is None or merge_node2 is None:\r\n\r\n if self.clusters[i].samples == cluster1:\r\n merge_node1 = self.clusters[i]\r\n self.clusters.pop(i)\r\n elif self.clusters[i].samples == cluster2:\r\n merge_node2 = self.clusters[i]\r\n self.clusters.pop(i)\r\n else: i += 1\r\n\r\n new_CL = ClusterNode(children1=merge_node1, children2=merge_node2)\r\n self.clusters.append(new_CL)\r\n\r\n def __update_distsort(self):\r\n distsort = self.distsort\r\n new_CL = self.clusters[-1]\r\n del_id1, del_id2 = distsort[0][0], distsort[0][1]\r\n for i in range(len(distsort)-1,-1,-1):\r\n if del_id1 == distsort[i][0] or del_id2 == distsort[i][0] or del_id1 == distsort[i][1] or del_id2 == distsort[i][1]:\r\n distsort.pop(i)\r\n for i in self.clusters[:-1]:\r\n dist = self.avglkge(i,new_CL)\r\n new_sortitem = (i.samples,new_CL.samples,dist)\r\n distsort.append(new_sortitem)\r\n self.distsort = sorted(distsort, key=lambda d: d[2])\r\n\r\n\r\n\r\n def clustering(self):\r\n\r\n while self.n_clusters > self.target:\r\n self.__merge_mix()\r\n self.__update_distsort()\r\n self.n_clusters = len(self.clusters)\r\n for i in self.clusters:\r\n print(i.samples, end='')\r\n print()\r\ndef main():\r\n samples = [[1, 0, 1, 1, 0, 0, 0, 0, 1],\r\n [1, 0, 0, 1, 0, 1, 1, 0, 1],\r\n [0, 1, 1, 0, 0, 0, 1, 1, 0],\r\n [0, 1, 0, 0, 1, 1, 0, 1, 0],\r\n [1, 0, 0, 1, 0, 1, 1, 1, 0],\r\n [0, 1, 1, 0, 0, 0, 1, 0, 1],]\r\n fit = HierarchicalClustering(samples)\r\n fit.clustering()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"Cokeysama/HierarchicalClustering","sub_path":"HCA-AGNES.py","file_name":"HCA-AGNES.py","file_ext":"py","file_size_in_byte":4030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36648421406","text":"import discord\r\nimport random\r\n\r\nTOKEN = 'OTI5NzIwNDI4NTkzODE1NTcy.Ydrbnw.DK_hswlFkPQzijV0NYiOUAJRAzc'\r\n\r\nclient = discord.Client()\r\n\r\n@client.event\r\nasync def on_ready():\r\n print('We have logged in as {0.user}'.format(client))\r\n\r\n@client.event\r\nasync def on_message(message):\r\n username = str(message.author).split('#')[0]\r\n user_message = str(message.content)\r\n channel = str(message.channel.name)\r\n print(f'{username}: {user_message} ({channel})')\r\n\r\n if message.author == client.user:\r\n return\r\n if message.channel.name == 'general':\r\n if user_message.lower() == 'hello':\r\n await message.channel.send(f'Hello {username} !')\r\n return\r\n elif user_message.lower() == 'bye':\r\n await message.channel.send(f'Bye {username}, was really nice to meet you !')\r\n elif user_message.lower() == '!random':\r\n response = f'This is your random number: {random.randrange(1000000)}'\r\n await message.channel.send(response)\r\n elif user_message.lower() == '!help':\r\n await message.channel.send('Here are the following commands that you can use during this discord channel : \\n 1.hello \\n 2.bye \\n 3. !random \\n 4. !anywhere (Activate globally)')\r\n return\r\n\r\n if user_message.lower() == '!anywhere':\r\n await message.channel.send('This can be anywhere!')\r\n return\r\n\r\nclient.run(TOKEN)\r\n\r\n\r\n\r\n\r\n","repo_name":"TalMogendorff/Python_Projects","sub_path":"tropital_chatbot_discord/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"73919269911","text":"def route(list):\n realRoute = []\n lenList = len(list)\n lenItem0 = len(list[0])\n for i in range(lenList):\n for j in range(lenItem0):\n if list[i][j] not in realRoute:\n realRoute.append(list[i][j])\n for i in realRoute:\n print(i + \"-\", end='')\n\ncities = [\n[\"Sarajevo\", \"Mostar\"],\n[\"Tuzla\", \"Zenica\"],\n[\"Mostar\", \"Konjic\"],\n[\"Zenica\", \"Banja Luka\"],\n[\"Konjic\", \"Tuzla\"],\n[\"Zavidovici\", \"Sarajevo\"]\n]\n\nroute(cities)\n","repo_name":"AbdurrahmanSp/CityRoutes","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"26779335876","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom __future__ import unicode_literals\r\n\r\nfrom .simple_requests.api import Request\r\n\r\n\r\nclass Client:\r\n\r\n\r\n def __init__(self, plugin, credential):\r\n self.plugin = plugin\r\n self.credential = credential\r\n\r\n self.DEVICE_ID = self.plugin.get_setting('device_id')\r\n self.TOKEN = self.plugin.get_setting('token')\r\n self.COUNTRY = self.plugin.get_setting('country')\r\n self.LANGUAGE = self.plugin.get_setting('language')\r\n self.PORTABILITY = self.plugin.get_setting('portability')\r\n self.MAX_REGISTRABLE_DEVICES = self.plugin.get_setting('max_registrable_devices')\r\n self.ENTITLEMENTS = self.plugin.get_setting('entitlements').split(',')\r\n self.POST_DATA = {}\r\n self.ERRORS = 0\r\n\r\n self.HEADERS = {\r\n 'Content-Type': 'application/json',\r\n 'Referer': self.plugin.api_base\r\n }\r\n\r\n self.PARAMS = {}\r\n\r\n self.STARTUP = 'https://startup.core.indazn.com/misl/v5/Startup'\r\n self.RAIL = self.plugin.get_setting('api_endpoint_rail')\r\n self.RAILS = self.plugin.get_setting('api_endpoint_rails')\r\n self.EPG = self.plugin.get_setting('api_endpoint_epg')\r\n self.EVENT = self.plugin.get_setting('api_endpoint_event')\r\n self.PLAYBACK = self.plugin.get_setting('api_endpoint_playback')\r\n self.SIGNIN = self.plugin.get_setting('api_endpoint_signin')\r\n self.SIGNOUT = self.plugin.get_setting('api_endpoint_signout')\r\n self.REFRESH = self.plugin.get_setting('api_endpoint_refresh_access_token')\r\n self.PROFILE = self.plugin.get_setting('api_endpoint_userprofile')\r\n self.RESOURCES = self.plugin.get_setting('api_endpoint_resource_strings')\r\n self.DEVICES = self.plugin.get_setting('api_endpoint_devices')\r\n\r\n\r\n def content_data(self, url):\r\n data = self.request(url)\r\n if data.get('odata.error', None):\r\n self.errorHandler(data)\r\n return data\r\n\r\n\r\n def rails(self, id_, params=''):\r\n self.PARAMS = {}\r\n self.PARAMS['country'] = self.COUNTRY\r\n self.PARAMS['groupId'] = id_\r\n self.PARAMS['params'] = params\r\n content_data = self.content_data(self.RAILS)\r\n for rail in content_data.get('Rails', []):\r\n id_ = rail.get('Id')\r\n resource = self.plugin.get_resource(id_, prefix='browseui_railHeader')\r\n title = resource.get('text')\r\n if resource.get('found') == False:\r\n rail_data = self.railFromCache(id_, rail.get('Params', params))\r\n title = rail_data.get('Title', rail.get('Id')) if isinstance(rail_data, dict) else rail.get('Id')\r\n else:\r\n title = resource.get('text')\r\n rail['Title'] = title\r\n return content_data\r\n\r\n\r\n def railFromCache(self, id_, params=''):\r\n return self.plugin.railCache.cacheFunction(self.rail, id_, params)\r\n\r\n\r\n def rail(self, id_, params=''):\r\n self.PARAMS = {}\r\n self.PARAMS['languageCode'] = self.LANGUAGE\r\n self.PARAMS['country'] = self.COUNTRY\r\n self.PARAMS['id'] = id_\r\n self.PARAMS['params'] = params\r\n return self.content_data(self.RAIL)\r\n\r\n\r\n def epg(self, params):\r\n self.PARAMS = {}\r\n self.PARAMS['languageCode'] = self.LANGUAGE\r\n self.PARAMS['country'] = self.COUNTRY\r\n self.PARAMS['date'] = params\r\n return self.content_data(self.EPG)\r\n\r\n\r\n def event(self, id_):\r\n self.PARAMS = {}\r\n self.PARAMS['languageCode'] = self.LANGUAGE\r\n self.PARAMS['country'] = self.COUNTRY\r\n self.PARAMS['id'] = id_\r\n return self.content_data(self.EVENT)\r\n\r\n\r\n def resources(self):\r\n self.PARAMS = {}\r\n self.PARAMS['languageCode'] = self.LANGUAGE\r\n self.PARAMS['region'] = self.COUNTRY\r\n self.PARAMS['platform'] = 'web'\r\n self.plugin.cache(self.RESOURCES, self.content_data(self.RESOURCES))\r\n\r\n\r\n def playback_data(self, id_):\r\n self.HEADERS['authorization'] = 'Bearer ' + self.TOKEN\r\n self.HEADERS['x-dazn-device'] = self.DEVICE_ID\r\n self.PARAMS = {}\r\n self.PARAMS['AssetId'] = id_\r\n self.PARAMS['LanguageCode'] = self.LANGUAGE\r\n self.PARAMS['Platform'] = 'web'\r\n self.PARAMS['Format'] = 'MPEG-DASH'\r\n self.PARAMS['PlayerId'] = 'DAZN-' + self.DEVICE_ID\r\n self.PARAMS['Model'] = 'unknown'\r\n self.PARAMS['Secure'] = 'true'\r\n self.PARAMS['Manufacturer'] = 'unknown'\r\n self.PARAMS['PlayReadyInitiator'] = 'false'\r\n self.PARAMS['MtaLanguageCode'] = self.LANGUAGE\r\n self.PARAMS['AppVersion'] = '9.26.0'\r\n res = self.request(self.PLAYBACK)\r\n self.plugin.log('playback: {0}'.format(res))\r\n return res\r\n\r\n\r\n def playback(self, id_, pin):\r\n if self.plugin.validate_pin(pin):\r\n self.HEADERS['x-age-verification-pin'] = pin\r\n data = self.playback_data(id_)\r\n if data.get('odata.error', None):\r\n self.errorHandler(data)\r\n if self.TOKEN:\r\n data = self.playback_data(id_)\r\n return data\r\n\r\n\r\n def userProfile(self):\r\n self.HEADERS['authorization'] = 'Bearer ' + self.TOKEN\r\n data = self.request(self.PROFILE)\r\n if data.get('odata.error', None):\r\n self.errorHandler(data)\r\n else:\r\n if 'PortabilityAvailable' in self.PORTABILITY:\r\n self.COUNTRY = self.plugin.portability_country(self.COUNTRY, data['UserCountryCode'])\r\n if not self.LANGUAGE.lower() == data['UserLanguageLocaleKey'].lower():\r\n self.LANGUAGE = data['UserLanguageLocaleKey']\r\n self.setLanguage(data['SupportedLanguages'])\r\n self.plugin.set_setting('viewer_id', data['ViewerId'])\r\n self.plugin.set_setting('language', self.LANGUAGE)\r\n self.plugin.set_setting('country', self.COUNTRY)\r\n self.plugin.set_setting('portability', self.PORTABILITY)\r\n\r\n\r\n def setLanguage(self, languages):\r\n self.LANGUAGE = self.plugin.language(self.LANGUAGE, languages)\r\n self.resources()\r\n\r\n\r\n def setToken(self, auth, result):\r\n self.plugin.log('[{0}] signin: {1}'.format(self.plugin.addon_id, result))\r\n if auth and result in ['SignedIn', 'SignedInInactive']:\r\n self.TOKEN = auth['Token']\r\n self.MAX_REGISTRABLE_DEVICES = self.plugin.get_max_registrable_devices(self.TOKEN)\r\n self.ENTITLEMENTS = self.plugin.get_entitlements(self.TOKEN)\r\n else:\r\n if result in ['HardOffer', 'SignedInPaused']:\r\n self.plugin.dialog_ok(self.plugin.get_resource('error_10101').get('text'))\r\n self.signOut()\r\n self.plugin.set_setting('token', self.TOKEN)\r\n self.plugin.set_setting('max_registrable_devices', '{}'.format(self.MAX_REGISTRABLE_DEVICES))\r\n self.plugin.set_setting('entitlements', ','.join(self.ENTITLEMENTS))\r\n\r\n\r\n def signIn(self):\r\n credentials = self.credential.get_credentials()\r\n if credentials:\r\n self.HEADERS['user-agent'] = '{}'.format(self.plugin.get_user_agent())\r\n self.HEADERS['x-dazn-ua'] = '{} {}'.format(self.plugin.get_user_agent(), 'signin/4.26.1.34 hyper/0.8.4 (web; production; de)')\r\n self.POST_DATA = {\r\n 'Email': credentials['email'],\r\n 'Password': credentials['password'],\r\n 'DeviceId': self.DEVICE_ID,\r\n 'Platform': 'web'\r\n }\r\n data = self.request(self.SIGNIN)\r\n if data.get('odata.error', None):\r\n self.errorHandler(data)\r\n else:\r\n self.setToken(data['AuthToken'], data.get('Result', 'SignInError'))\r\n if self.plugin.get_setting('save_login') == 'true' and self.plugin.get_setting('token'):\r\n self.credential.set_credentials(credentials['email'], credentials['password'])\r\n else:\r\n self.plugin.dialog_ok(self.plugin.get_resource('signin_tvNoSignUpPerex').get('text'))\r\n\r\n\r\n def signOut(self):\r\n if self.TOKEN:\r\n self.HEADERS['authorization'] = 'Bearer ' + self.TOKEN\r\n self.POST_DATA = {\r\n 'DeviceId': self.DEVICE_ID\r\n }\r\n r = self.request(self.SIGNOUT)\r\n self.TOKEN = ''\r\n self.plugin.set_setting('token', self.TOKEN)\r\n self.plugin.set_setting('device_id', '')\r\n\r\n\r\n def refreshToken(self):\r\n self.HEADERS['authorization'] = 'Bearer ' + self.TOKEN\r\n self.HEADERS['user-agent'] = '{}'.format(self.plugin.get_user_agent())\r\n self.HEADERS['x-dazn-ua'] = '{} {}'.format(self.plugin.get_user_agent(), 'signin/4.26.1.34 hyper/0.8.4 (web; production; de)')\r\n self.POST_DATA = {\r\n 'DeviceId': self.DEVICE_ID\r\n }\r\n data = self.request(self.REFRESH)\r\n if data.get('odata.error', None):\r\n self.signOut()\r\n self.errorHandler(data)\r\n else:\r\n self.setToken(data['AuthToken'], data.get('Result', 'RefreshAccessTokenError'))\r\n\r\n\r\n def playableDevices(self):\r\n self.HEADERS['authorization'] = 'Bearer ' + self.TOKEN\r\n data = self.request(self.DEVICES)\r\n if data.get('odata.error', None):\r\n self.errorHandler(data)\r\n return None\r\n else:\r\n playableDevices = 0\r\n for device in data.get('devices'):\r\n if device.get('playable'):\r\n playableDevices += 1\r\n\r\n return playableDevices\r\n\r\n\r\n def initStartupData(self):\r\n self.POST_DATA = {\r\n 'LandingPageKey': 'generic',\r\n 'Languages': '{0}, {1}'.format(self.plugin.gui_language(), self.LANGUAGE),\r\n 'Platform': 'web',\r\n 'Manufacturer': '',\r\n 'PromoCode': ''\r\n }\r\n return self.request(self.STARTUP)\r\n\r\n\r\n def initApiEndpoints(self, endpoints_dict):\r\n self.RAIL = endpoints_dict.get('api_endpoint_rail')\r\n self.RAILS = endpoints_dict.get('api_endpoint_rails')\r\n self.EPG = endpoints_dict.get('api_endpoint_epg')\r\n self.EVENT = endpoints_dict.get('api_endpoint_event')\r\n self.PLAYBACK = self.plugin.get_setting('api_endpoint_playback')\r\n self.SIGNIN = endpoints_dict.get('api_endpoint_signin')\r\n self.SIGNOUT = endpoints_dict.get('api_endpoint_signout')\r\n self.REFRESH = endpoints_dict.get('api_endpoint_refresh_access_token')\r\n self.PROFILE = endpoints_dict.get('api_endpoint_userprofile')\r\n self.RESOURCES = endpoints_dict.get('api_endpoint_resource_strings')\r\n self.DEVICES = endpoints_dict.get('api_endpoint_devices')\r\n\r\n\r\n def initRegion(self, startup_data):\r\n region = startup_data.get('Region', {})\r\n if region:\r\n self.PORTABILITY = region['CountryPortabilityStatus']\r\n self.COUNTRY = region['Country']\r\n self.LANGUAGE = region['Language']\r\n self.setLanguage(startup_data['SupportedLanguages'])\r\n\r\n return region\r\n\r\n\r\n def startUp(self, region):\r\n if region.get('isAllowed', False):\r\n if self.TOKEN:\r\n self.refreshToken()\r\n else:\r\n self.signIn()\r\n else:\r\n self.TOKEN = ''\r\n self.plugin.log('[{0}] version: {1} region: {2}'.format(self.plugin.addon_id, self.plugin.addon_version, region))\r\n self.plugin.dialog_ok(self.plugin.get_resource('error_2003_notAvailableInCountry').get('text'))\r\n\r\n\r\n def request(self, url):\r\n requests = Request(self.plugin)\r\n if self.POST_DATA:\r\n r = requests.post(url, headers=self.HEADERS, data=self.POST_DATA, params=self.PARAMS)\r\n self.POST_DATA = {}\r\n else:\r\n r = requests.get(url, headers=self.HEADERS, params=self.PARAMS)\r\n\r\n if r.text and self.plugin.get_dict_value(r.headers, 'content-type').startswith('application/json'):\r\n return r.json()\r\n else:\r\n if not r.status_code == 204:\r\n self.plugin.log('[{0}] error: {1} ({2}, {3})'.format(self.plugin.addon_id, url, str(r.status_code), self.plugin.get_dict_value(r.headers, 'content-type')))\r\n if r.status_code == -1:\r\n self.plugin.log('[{0}] error: {1}'.format(self.plugin.addon_id, r.text))\r\n return {}\r\n\r\n\r\n def errorHandler(self, data):\r\n self.ERRORS += 1\r\n msg = data['odata.error']['message']['value']\r\n code = str(data['odata.error']['code'])\r\n self.plugin.log('[{0}] version: {1} country: {2} language: {3} portability: {4}'.format(self.plugin.addon_id, self.plugin.addon_version, self.COUNTRY, self.LANGUAGE, self.PORTABILITY))\r\n self.plugin.log('[{0}] error: {1} ({2})'.format(self.plugin.addon_id, msg, code))\r\n\r\n error_codes = ['10006', '10008', '10450']\r\n pin_codes = ['10155', '10161', '10163']\r\n\r\n if code == '10000' and self.ERRORS < 3:\r\n self.refreshToken()\r\n elif (code == '401' or code == '10033') and self.ERRORS < 3:\r\n self.signIn()\r\n elif code == '3001':\r\n self.startUp()\r\n elif code == '10049':\r\n self.plugin.dialog_ok(self.plugin.get_resource('signin_errormessage').get('text'))\r\n elif code == '10450' and self.ERRORS < 3:\r\n if self.playableDevices() >= int(self.MAX_REGISTRABLE_DEVICES):\r\n self.plugin.dialog_ok(self.plugin.get_resource('error2_65_450_403_header').get('text'))\r\n else:\r\n self.refreshToken()\r\n elif code == '10801':\r\n self.plugin.dialog_ok(self.plugin.get_resource('error2_65_801_403_header').get('text'))\r\n elif code in error_codes:\r\n self.plugin.dialog_ok(self.plugin.get_resource('error_{0}'.format(code)).get('text'))\r\n elif code in pin_codes:\r\n self.TOKEN = ''\r\n self.plugin.dialog_ok(self.plugin.get_resource('error_{0}'.format(code)).get('text'))\r\n","repo_name":"Maven85/plugin.video.dazn","sub_path":"resources/lib/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":14107,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"5"} +{"seq_id":"21129289380","text":"lower_value = 1\nupper_value = int(input (\"Zadejte nejvyšší hodnotu čísla: \")) \nfor number in range (lower_value, upper_value + 1): \n if number > 1: \n for i in range (2, number): \n if (number % i) == 0: \n break \n else: \n print (number) \ninput(\"Stiskni klávesu ENTER pro ukončení programu\")","repo_name":"N4N001/python","sub_path":"prvocisla.py","file_name":"prvocisla.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25922527842","text":"from http.server import HTTPServer, BaseHTTPRequestHandler\nimport os\n\ncmd = \"curl http://metadata.google.internal/computeMetadata/v1/instance/%s -H Metadata-Flavor:Google 2>&1 -s -S\"\n\nclass MyHandler(BaseHTTPRequestHandler):\n def do_GET(self):\n try:\n id = os.popen(cmd % 'id').read()\n hostname = os.popen(cmd % 'hostname').read()\n except:\n id = \"undefined\"\n hostname = \"undefined\"\n\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n response = \"Hello from %s %s\" % (id, hostname)\n self.wfile.write(response.encode(\"utf-8\"))\n\n\nprint(\"starting\")\n\ntry:\n server = HTTPServer(('', 8080), MyHandler)\n server.serve_forever()\nexcept KeyboardInterrupt:\n print('^C received, shutting down server')\n server.socket.close()\n","repo_name":"mkulak/swali","sub_path":"pulumi/app/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"26636534543","text":"import sqlite3\nfrom .db import DB\n\n\nclass CumulativeSummary(DB):\n\n def __init__(self):\n super().__init__()\n\n self.table_name = 'KL_cumulative_summary'\n self.table_desc = 'Cumulative summary'\n self.cols = self.getcolumns()\n\n def getcolumns(self):\n cols = {\n 'date': 'DATE NOT NULL PRIMARY KEY',\n 'total_positive_cases': 'INT',\n 'active_cases': 'INT',\n 'total_recovered': 'INT',\n 'total_persons_in_surveillance': 'INT',\n 'total_persons_in_home_ins_isolation': 'INT',\n 'total_persons_in_hospital_isolation': 'INT',\n 'total_deaths': 'INT',\n 'total_deaths_declared_as_per_appeal': 'INT',\n 'total_pending_deaths': 'INT'\n }\n return cols\n","repo_name":"IBM/covid19-india-data","sub_path":"data_extractor/db/KL_tables/KL_cumulative_summary.py","file_name":"KL_cumulative_summary.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"5"} +{"seq_id":"18661580311","text":"import os\nfrom PIL import Image\n\ndef convert_to_jpg(infile):\n f, e = os.path.splitext(infile)\n outfile = f + \".jpg\"\n if infile != outfile:\n try:\n x = Image.open(infile).save(outfile)\n except IOError:\n \tpass\n #print(\"cannot convert\", infile)\n return outfile\n\n\ndef main():\n dirname = 'tiles' # a directory of .gif files\n pictdb = os.listdir(dirname)\n L = []\n for item in pictdb:\n im = (convert_to_jpg(item))\n L.append(im)\n\n print (L)\t\t\n return L\n\nif __name__ == '__main__':\n\tmain()","repo_name":"gregpuzzles1/Photomosaic","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"16960628748","text":"import asyncio\nimport json\nimport logging\nimport os\nimport ssl\nimport sys\nimport tempfile\nimport uuid\nimport warnings\nfrom collections.abc import Iterator\nfrom contextlib import AsyncExitStack\nfrom dataclasses import dataclass\nfrom datetime import datetime, timedelta\nfrom email.utils import parseaddr\nfrom io import BytesIO\nfrom typing import Any, Optional, TypedDict, cast\n\nimport aiojobs\nimport aiozipkin\nimport asyncpg\nimport asyncpg.pool\nimport uvloop\nfrom aiohttp import (\n ClientConnectorError,\n ClientOSError,\n ClientResponseError,\n ClientSession,\n ClientTimeout,\n MultipartReader,\n ServerDisconnectedError,\n web,\n)\nfrom aiohttp_openmetrics import Counter, Gauge, Histogram, metrics, metrics_middleware\nfrom breezy import debug, urlutils\nfrom breezy.branch import Branch\nfrom breezy.errors import PermissionDenied, UnexpectedHttpStatus\n\ntry:\n from breezy.errors import ConnectionError # type: ignore\nexcept ImportError: # breezy >= 4\n pass\nfrom breezy.transport import Transport, UnsupportedProtocol, UnusableRedirect\nfrom redis.asyncio import Redis\nfrom silver_platter.probers import select_preferred_probers\nfrom silver_platter.proposal import (\n Forge,\n ForgeLoginRequired,\n NoSuchProject,\n UnsupportedForge,\n find_existing_proposed,\n get_forge,\n)\nfrom silver_platter.utils import BranchRateLimited, full_branch_url\nfrom yarl import URL\n\nfrom . import set_user_agent, splitout_env, state\nfrom ._launchpad import override_launchpad_consumer_name\nfrom .artifacts import (\n ArtifactManager,\n LocalArtifactManager,\n get_artifact_manager,\n store_artifacts_with_backup,\n upload_backup_artifacts,\n)\nfrom .config import Campaign, get_campaign_config, get_distribution, read_config\nfrom .debian import dpkg_vendor\nfrom .logs import FileSystemLogFileManager, LogFileManager, get_log_manager, import_logs\nfrom .queue import Queue, QueueItem\nfrom .schedule import (\n CandidateUnavailable,\n do_schedule,\n do_schedule_control,\n do_schedule_regular,\n)\nfrom .vcs import (\n BranchOpenFailure,\n UnsupportedVcs,\n VcsManager,\n get_vcs_abbreviation,\n get_vcs_managers,\n is_authenticated_url,\n open_branch_ext,\n)\nfrom .worker_creds import check_worker_creds\n\noverride_launchpad_consumer_name()\n\n\nDEFAULT_RETRY_AFTER = 120\nREMOTE_BRANCH_OPEN_TIMEOUT = 10.0\nVCS_STORE_BRANCH_OPEN_TIMEOUT = 5.0\n# Maybe this should be configurable somewhere?\nDEFAULT_VCS_TYPE = 'git'\n\n\nroutes = web.RouteTableDef()\nrun_count = Counter(\"run_count\", \"Number of runs executed.\")\nlast_success_gauge = Gauge(\n \"job_last_success_unixtime\", \"Last time a batch job successfully finished\"\n)\nbuild_duration = Histogram(\"build_duration\", \"Build duration\", [\"campaign\"])\nrun_result_count = Counter(\"result\", \"Result counts\", [\"campaign\", \"result_code\"])\nactive_run_count = Gauge(\"active_runs\", \"Number of active runs\", [\"worker\"])\nassignment_count = Counter(\"assignments\", \"Number of assignments handed out\", [\"worker\"])\nrate_limited_count = Counter(\"rate_limited_host\", \"Rate limiting per host\", [\"host\"])\nartifact_upload_failed_count = Counter(\n \"artifact_upload_failed\", \"Number of failed artifact uploads\")\nqueue_empty_count = Counter(\n \"queue_empty\",\n \"Number of times the queue was empty when an assignment was requested\")\n\n\nasync def to_thread_timeout(timeout, func, *args, **kwargs):\n cor = asyncio.to_thread(func, *args, **kwargs)\n if timeout is not None:\n cor = asyncio.wait_for(cor, timeout=timeout)\n return await cor\n\n\nclass BuilderResult:\n\n kind: str\n\n def from_directory(self, path):\n raise NotImplementedError(self.from_directory)\n\n async def store(self, conn, run_id):\n raise NotImplementedError(self.store)\n\n def json(self):\n raise NotImplementedError(self.json)\n\n def artifact_filenames(self):\n raise NotImplementedError(self.artifact_filenames)\n\n @classmethod\n def from_json(cls, json):\n raise NotImplementedError(cls.from_json)\n\n\nclass Builder:\n \"\"\"Abstract builder class.\"\"\"\n\n kind: str\n\n result_cls: type[BuilderResult] = BuilderResult\n\n async def config(\n self, conn: asyncpg.Connection,\n campaign_config: Campaign, queue_item: QueueItem) -> dict[str, str]:\n raise NotImplementedError(self.config)\n\n async def build_env(\n self, conn: asyncpg.Connection,\n campaign_config: Campaign, queue_item: QueueItem) -> dict[str, str]:\n raise NotImplementedError(self.build_env)\n\n def additional_colocated_branches(self, main_branch):\n raise NotImplementedError(self.additional_colocated_branches)\n\n\nclass GenericResult(BuilderResult):\n \"\"\"Generic build result.\"\"\"\n\n kind = \"generic\"\n\n @classmethod\n def from_json(cls, target_details):\n return cls()\n\n def from_directory(self, path):\n pass\n\n def json(self):\n return {}\n\n def artifact_filenames(self):\n return []\n\n async def store(self, conn, run_id):\n pass\n\n\nclass GenericBuilder(Builder):\n \"\"\"Generic builder.\"\"\"\n\n kind = \"generic\"\n\n result_cls = GenericResult\n\n def __init__(self, dep_server_url) -> None:\n self.dep_server_url = dep_server_url\n\n async def config(self, conn, campaign_config, queue_item):\n config = {}\n if campaign_config.generic_build.chroot:\n config[\"chroot\"] = campaign_config.generic_build.chroot\n config[\"dep_server_url\"] = self.dep_server_url\n return config\n\n async def build_env(self, conn, campaign_config, queue_item):\n return {}\n\n def additional_colocated_branches(self, main_branch):\n return {}\n\n\nclass DebianResult(BuilderResult):\n\n kind = \"debian\"\n\n def __init__(\n self, source=None, build_version=None, build_distribution=None,\n changes_filenames=None, lintian_result=None, binary_packages=None\n ) -> None:\n self.source = source\n self.build_version = build_version\n self.build_distribution = build_distribution\n self.binary_packages = binary_packages\n self.changes_filenames = changes_filenames\n self.lintian_result = lintian_result\n\n def from_directory(self, path):\n from .debian import NoChangesFile, find_changes\n try:\n self.output_directory = path\n (\n self.changes_filenames,\n self.source,\n self.build_version,\n self.build_distribution,\n self.binary_packages\n ) = find_changes(path)\n except NoChangesFile as e:\n # Oh, well.\n logging.info(\"No changes file found: %s\", e)\n else:\n logging.info(\n \"Found changes files %r, source %s, build version %s, \"\n \"distribution: %s, binary packages: %r\",\n self.source, self.changes_filenames, self.build_version,\n self.build_distribution, self.binary_packages)\n\n def artifact_filenames(self):\n if not self.changes_filenames:\n return []\n from .debian import changes_filenames\n ret = []\n for changes_filename in self.changes_filenames:\n changes_path = os.path.join(self.output_directory, changes_filename)\n ret.extend(changes_filenames(changes_path))\n ret.append(changes_filename)\n return ret\n\n @classmethod\n def from_json(cls, target_details):\n return cls(lintian_result=target_details.get('lintian'))\n\n async def store(self, conn, run_id):\n if self.build_version:\n await conn.execute(\n \"INSERT INTO debian_build (run_id, source, version, distribution, lintian_result, binary_packages) \"\n \"VALUES ($1, $2, $3, $4, $5, $6)\",\n run_id,\n self.source,\n self.build_version,\n self.build_distribution,\n self.lintian_result,\n self.binary_packages\n )\n\n def json(self):\n return {\n \"build_distribution\": self.build_distribution,\n \"build_version\": self.build_version,\n \"changes_filenames\": self.changes_filenames,\n \"lintian\": self.lintian_result,\n \"binary_packages\": self.binary_packages,\n }\n\n def __bool__(self) -> bool:\n return self.changes_filenames is not None\n\n\nclass DebianBuilder(Builder):\n\n kind = \"debian\"\n\n result_cls = DebianResult\n\n def __init__(self, distro_config, apt_location: Optional[str] = None,\n dep_server_url: Optional[str] = None) -> None:\n self.distro_config = distro_config\n self.apt_location = apt_location\n self.dep_server_url = dep_server_url\n\n async def config(self, conn, campaign_config, queue_item):\n config: dict[str, Any] = {}\n config['lintian'] = {'profile': self.distro_config.lintian_profile}\n if self.distro_config.lintian_suppress_tag:\n config['lintian']['suppress-tags'] = list(self.distro_config.lintian_suppress_tag)\n\n extra_janitor_distributions = list(campaign_config.debian_build.extra_build_distribution)\n if queue_item.change_set:\n extra_janitor_distributions.append('cs/%s' % queue_item.change_set)\n\n # TODO(jelmer): Ship build-extra-repositories-keys, and specify [signed-by] here\n config['build-extra-repositories'] = []\n if self.apt_location:\n config['build-extra-repositories'].extend([\n f\"deb [trusted=yes] {self.apt_location} {suite} main\"\n for suite in extra_janitor_distributions\n ])\n\n config[\"build-distribution\"] = campaign_config.debian_build.build_distribution or campaign_config.name\n\n config[\"build-suffix\"] = campaign_config.debian_build.build_suffix or \"\"\n\n if campaign_config.debian_build.build_command:\n config[\"build-command\"] = campaign_config.debian_build.build_command\n elif self.distro_config.build_command:\n config[\"build-command\"] = self.distro_config.build_command\n\n last_build_version = await conn.fetchval(\n \"SELECT MAX(debian_build.version) FROM run \"\n \"LEFT JOIN debian_build ON debian_build.run_id = run.id \"\n \"WHERE debian_build.version IS NOT NULL AND run.codebase = $1 AND \"\n \"debian_build.distribution = $2\",\n queue_item.codebase, config['build-distribution']\n )\n\n if last_build_version:\n config[\"last-build-version\"] = str(last_build_version)\n\n if campaign_config.debian_build.chroot:\n config[\"chroot\"] = campaign_config.debian_build.chroot\n elif self.distro_config.chroot:\n config[\"chroot\"] = self.distro_config.chroot\n\n if self.distro_config.archive_mirror_uri and self.distro_config.component:\n config[\"base-apt-repository\"] = \"{} {} {}\".format(\n self.distro_config.archive_mirror_uri,\n self.distro_config.name,\n \" \".join(self.distro_config.component),\n )\n config[\"base-apt-repository-signed-by\"] = self.distro_config.signed_by or None\n config[\"dep_server_url\"] = self.dep_server_url\n\n return config\n\n async def build_env(self, conn, campaign_config, queue_item):\n env = {}\n\n if self.distro_config.name:\n env[\"DISTRIBUTION\"] = self.distro_config.name\n\n env['DEB_VENDOR'] = self.distro_config.vendor or dpkg_vendor()\n\n if campaign_config.debian_build.chroot:\n env[\"CHROOT\"] = campaign_config.debian_build.chroot\n elif self.distro_config.chroot:\n env[\"CHROOT\"] = self.distro_config.chroot\n\n if self.distro_config.archive_mirror_uri and self.distro_config.component:\n env[\"APT_REPOSITORY\"] = \"{} {} {}\".format(\n self.distro_config.archive_mirror_uri,\n self.distro_config.name,\n \" \".join(self.distro_config.component),\n )\n # TODO(jelmer): Set env[\"APT_REPOSITORY_KEY\"]\n\n return env\n\n def additional_colocated_branches(self, main_branch):\n from silver_platter.debian import pick_additional_colocated_branches\n return pick_additional_colocated_branches(main_branch)\n\n\nBUILDER_CLASSES: list[type[Builder]] = [DebianBuilder, GenericBuilder]\nRESULT_CLASSES = [builder_cls.result_cls for builder_cls in BUILDER_CLASSES]\n\n\ndef get_builder(config, campaign_config, apt_archive_url=None, dep_server_url=None):\n if campaign_config.HasField('debian_build'):\n try:\n distribution = get_distribution(\n config, campaign_config.debian_build.base_distribution)\n except KeyError as e:\n raise NotImplementedError(\n \"Unsupported distribution: \"\n f\"{campaign_config.debian_build.base_distribution}\") from e\n return DebianBuilder(\n distribution,\n apt_archive_url,\n dep_server_url,\n )\n elif campaign_config.HasField('generic_build'):\n return GenericBuilder(dep_server_url)\n else:\n raise NotImplementedError('no supported build type')\n\n\nclass JanitorResult:\n\n log_id: str\n branch_url: str\n subpath: Optional[str]\n code: str\n transient: Optional[bool]\n codebase: str\n\n def __init__(\n self,\n *,\n codebase: str,\n log_id: str,\n branch_url: str,\n code: str,\n description: Optional[str] = None,\n worker_result=None,\n logfilenames=None,\n campaign=None,\n start_time=None,\n finish_time=None,\n worker_name=None,\n vcs_type: Optional[str] = None,\n subpath: Optional[str] = None,\n resume_from: Optional[str] = None,\n change_set: Optional[str] = None,\n transient=None,\n ) -> None:\n self.campaign = campaign\n self.log_id = log_id\n self.description = description\n self.branch_url = branch_url\n self.code = code\n self.logfilenames = logfilenames or []\n self.worker_name = worker_name\n self.vcs_type = vcs_type\n self.subpath = subpath\n self.change_set = change_set\n if worker_result is not None:\n self.context = worker_result.context\n self.code = worker_result.code or code\n if self.description is None:\n self.description = worker_result.description\n self.main_branch_revision = worker_result.main_branch_revision\n self.codemod_result = worker_result.codemod\n self.revision = worker_result.revision\n self.value = worker_result.value\n self.builder_result = worker_result.builder_result\n self.branches = worker_result.branches\n self.tags = worker_result.tags\n self.remotes = worker_result.remotes\n self.failure_details = worker_result.details\n self.failure_stage = worker_result.stage\n self.start_time = worker_result.start_time\n self.finish_time = worker_result.finish_time\n if worker_result.refreshed:\n self.resume_from = None\n else:\n self.resume_from = resume_from\n self.target_branch_url = worker_result.target_branch_url\n self.branch_url = worker_result.branch_url\n self.vcs_type = worker_result.vcs_type\n self.subpath = worker_result.subpath\n self.transient = worker_result.transient\n self.codebase = worker_result.codebase or codebase\n else:\n self.start_time = start_time\n self.finish_time = finish_time\n self.context = None\n self.main_branch_revision = None\n self.revision = None\n self.codemod_result = None\n self.value = None\n self.builder_result = None\n self.branches = None\n self.tags = None\n self.failure_details = None\n self.failure_stage = None\n self.target_branch_url = None\n self.remotes = {}\n self.resume_from = None\n self.transient = transient\n self.codebase = codebase\n\n @property\n def duration(self):\n return self.finish_time - self.start_time\n\n def json(self):\n return {\n \"codebase\": self.codebase,\n \"campaign\": self.campaign,\n \"change_set\": self.change_set,\n \"log_id\": self.log_id,\n \"description\": self.description,\n \"code\": self.code,\n \"failure_details\": self.failure_details,\n \"failure_stage\": self.failure_stage,\n \"duration\": self.duration.total_seconds(),\n \"finish_time\": self.finish_time.isoformat(),\n \"start_time\": self.start_time.isoformat(),\n \"transient\": self.transient,\n \"target\": ({\n \"name\": self.builder_result.kind,\n \"details\": self.builder_result.json(),\n } if self.builder_result else {}),\n \"logfilenames\": self.logfilenames,\n \"codemod\": self.codemod_result,\n \"value\": self.value,\n \"remotes\": self.remotes,\n \"branch_url\": self.branch_url,\n \"resume\": {\"run_id\": self.resume_from} if self.resume_from else None,\n \"branches\": (\n [\n (fn, n, br.decode(\"utf-8\") if br else None,\n r.decode(\"utf-8\") if r else None)\n for (fn, n, br, r) in self.branches\n ]\n if self.branches is not None\n else None\n ),\n \"tags\": (\n [(n, r.decode(\"utf-8\")) for (n, r) in self.tags]\n if self.tags is not None\n else None\n ),\n \"revision\": self.revision.decode(\"utf-8\") if self.revision else None,\n \"main_branch_revision\": self.main_branch_revision.decode(\"utf-8\")\n if self.main_branch_revision\n else None,\n }\n\n\ndef committer_env(committer: str) -> dict[str, str]:\n env: dict[str, str] = {}\n if not committer:\n return env\n (user, email) = parseaddr(committer)\n if user:\n env[\"DEBFULLNAME\"] = user\n if email:\n env[\"DEBEMAIL\"] = email\n env[\"COMMITTER\"] = committer\n env[\"BRZ_EMAIL\"] = committer\n env[\"GIT_COMMITTER_NAME\"] = user\n env[\"GIT_COMMITTER_EMAIL\"] = email\n env[\"GIT_AUTHOR_NAME\"] = user\n env[\"GIT_AUTHOR_EMAIL\"] = email\n env[\"EMAIL\"] = email\n return env\n\n\n@dataclass\nclass WorkerResult:\n \"\"\"The result from a worker.\"\"\"\n\n code: str\n description: Optional[str]\n context: Any\n codemod: Optional[Any] = None\n main_branch_revision: Optional[bytes] = None\n revision: Optional[bytes] = None\n value: Optional[int] = None\n branches: Optional[list[\n tuple[Optional[str], Optional[str],\n Optional[bytes], Optional[bytes]]]] = None\n tags: Optional[list[tuple[str, Optional[bytes]]]] = None\n remotes: Optional[dict[str, dict[str, Any]]] = None\n details: Any = None\n stage: Optional[str] = None\n builder_result: Any = None\n start_time: Optional[datetime] = None\n finish_time: Optional[datetime] = None\n queue_id: Optional[int] = None\n worker_name: Optional[str] = None\n refreshed: bool = False\n target_branch_url: Optional[str] = None\n branch_url: Optional[str] = None\n vcs_type: Optional[str] = None\n subpath: Optional[str] = None\n transient: Optional[bool] = None\n codebase: Optional[str] = None\n\n @classmethod\n def from_file(cls, path):\n \"\"\"Create a WorkerResult object from a JSON file.\"\"\"\n with open(path) as f:\n worker_result = json.load(f)\n return cls.from_json(worker_result)\n\n @classmethod\n def from_json(cls, worker_result):\n main_branch_revision = worker_result.get(\"main_branch_revision\")\n if main_branch_revision is not None:\n main_branch_revision = main_branch_revision.encode(\"utf-8\")\n revision = worker_result.get(\"revision\")\n if revision is not None:\n revision = revision.encode(\"utf-8\")\n branches = worker_result.get(\"branches\")\n tags = worker_result.get(\"tags\")\n if branches:\n branches = [\n (fn, n, br.encode(\"utf-8\") if br else None,\n r.encode(\"utf-8\") if r else None)\n for (fn, n, br, r) in branches\n ]\n if tags:\n tags = [(n, r.encode(\"utf-8\")) for (n, r) in tags]\n target_kind = worker_result.get(\"target\", {\"name\": None})[\"name\"]\n for result_cls in RESULT_CLASSES:\n if target_kind == result_cls.kind:\n target_details = worker_result[\"target\"][\"details\"]\n if target_details is not None:\n builder_result = result_cls.from_json(target_details)\n else:\n builder_result = None\n break\n else:\n if target_kind is None:\n builder_result = None\n else:\n raise NotImplementedError('unsupported build target %r' % target_kind)\n return cls(\n code=worker_result.get(\"code\", \"missing-result-code\"),\n description=worker_result.get(\"description\"),\n context=worker_result.get(\"context\"),\n codemod=worker_result.get(\"codemod\"),\n main_branch_revision=main_branch_revision,\n revision=revision,\n value=int(worker_result[\"value\"]) if worker_result.get(\"value\") else None,\n branches=branches,\n tags=tags,\n remotes=worker_result.get(\"remotes\"),\n details=worker_result.get(\"details\"),\n stage=worker_result.get(\"stage\"),\n builder_result=builder_result,\n start_time=datetime.fromisoformat(worker_result['start_time'])\n if 'start_time' in worker_result else None,\n finish_time=datetime.fromisoformat(worker_result['finish_time'])\n if 'finish_time' in worker_result else None,\n queue_id=(\n int(worker_result[\"queue_id\"])\n if \"queue_id\" in worker_result else None),\n worker_name=worker_result.get(\"worker_name\"),\n refreshed=worker_result.get(\"refreshed\", False),\n target_branch_url=worker_result.get(\"target_branch_url\", None),\n branch_url=worker_result.get(\"branch_url\"),\n subpath=worker_result.get(\"subpath\"),\n vcs_type=worker_result.get(\"vcs_type\"),\n transient=worker_result.get(\"transient\"),\n codebase=worker_result.get(\"codebase\"),\n )\n\n\ndef is_log_filename(name):\n parts = name.split(\".\")\n return parts[-1] == \"log\" or (\n len(parts) == 3 and parts[-2] == \"log\" and parts[-1].isdigit())\n\n\ndef gather_logs(output_directory: str) -> Iterator[os.DirEntry]:\n \"\"\"Scan a directory for log files.\n\n Args:\n output_directory: Directory to scan\n Returns:\n Iterator over DirEntry objects matching logs\n \"\"\"\n for entry in os.scandir(output_directory):\n if entry.is_dir():\n continue\n if is_log_filename(entry.name):\n yield entry\n\n\nclass PingFailure(Exception):\n \"\"\"Failure to ping the job.\"\"\"\n\n def __init__(self, reason) -> None:\n self.reason = reason\n\n\nclass PingTimeout(PingFailure):\n \"\"\"Timeout while pinging job.\"\"\"\n\n\nclass PingFatalFailure(PingFailure):\n \"\"\"Failure to ping the job that's not retriable.\"\"\"\n\n\nclass Backchannel:\n\n async def kill(self) -> None:\n raise NotImplementedError(self.kill)\n\n async def list_log_files(self):\n raise NotImplementedError(self.list_log_files)\n\n async def get_log_file(self, name):\n raise NotImplementedError(self.get_log_file)\n\n async def ping(self, log_id: str) -> None:\n raise NotImplementedError(self.ping)\n\n def json(self):\n return {}\n\n\nclass VcsInfo(TypedDict, total=False):\n vcs_type: str\n branch_url: str\n subpath: str\n\n\nclass ActiveRun:\n\n worker_name: str\n worker_link: Optional[str]\n queue_item: QueueItem\n queue_id: int\n log_id: str\n start_time: datetime\n finish_time: Optional[datetime]\n estimated_duration: Optional[timedelta]\n campaign: str\n change_set: Optional[str]\n command: str\n backchannel: Backchannel\n vcs_info: VcsInfo\n\n def __init__(\n self,\n *,\n campaign: str,\n codebase: str,\n change_set: Optional[str],\n command: str,\n instigated_context: Any,\n estimated_duration: Optional[timedelta],\n queue_id: int,\n log_id: str,\n start_time: datetime,\n vcs_info: VcsInfo,\n backchannel: Optional[Backchannel],\n worker_name: str,\n worker_link: Optional[str] = None,\n resume_from: Optional[str] = None,\n ) -> None:\n self.campaign = campaign\n self.change_set = change_set\n self.command = command\n self.instigated_context = instigated_context\n self.estimated_duration = estimated_duration\n self.queue_id = queue_id\n self.start_time = start_time\n self.log_id = log_id\n self.worker_name = worker_name\n self.vcs_info = vcs_info\n self.backchannel = backchannel or Backchannel()\n self.worker_link = worker_link\n self.resume_from = resume_from\n self._watch_dog = None\n self.codebase = codebase\n\n @classmethod\n def from_queue_item(\n cls,\n queue_item: QueueItem,\n vcs_info: VcsInfo,\n backchannel: Optional[Backchannel],\n worker_name: str,\n worker_link: Optional[str] = None,\n ):\n return cls(\n campaign=queue_item.campaign,\n codebase=queue_item.codebase,\n change_set=queue_item.change_set,\n command=queue_item.command,\n instigated_context=queue_item.context,\n estimated_duration=queue_item.estimated_duration,\n queue_id=queue_item.id,\n start_time=datetime.utcnow(),\n log_id=str(uuid.uuid4()),\n backchannel=backchannel,\n vcs_info=vcs_info,\n worker_name=worker_name,\n worker_link=worker_link)\n\n @classmethod\n def from_json(cls, js):\n backchannel: Backchannel\n if 'jenkins' in js['backchannel']:\n backchannel = JenkinsBackchannel.from_json(js['backchannel'])\n elif 'my_url' in js['backchannel']:\n backchannel = PollingBackchannel.from_json(js['backchannel'])\n else:\n backchannel = Backchannel()\n return cls(\n campaign=js['campaign'],\n start_time=datetime.fromisoformat(js['start_time']),\n change_set=js['change_set'],\n command=js['command'],\n instigated_context=js['instigated_context'],\n estimated_duration=(\n timedelta(seconds=js['estimated_duration'])\n if js.get('estimated_duration') else None),\n queue_id=js['queue_id'],\n log_id=js['id'],\n backchannel=backchannel,\n vcs_info=cast(VcsInfo, (js['vcs'] or {})),\n worker_name=js['worker'],\n worker_link=js['worker_link'],\n codebase=js.get('codebase'),\n resume_from=js.get('resume_from'),\n )\n\n @property\n def current_duration(self):\n return datetime.utcnow() - self.start_time\n\n def create_result(self, **kwargs):\n return JanitorResult(\n campaign=self.campaign,\n start_time=self.start_time,\n finish_time=datetime.utcnow(),\n log_id=self.log_id,\n worker_name=self.worker_name,\n resume_from=self.resume_from,\n change_set=self.change_set,\n codebase=self.codebase,\n **kwargs)\n\n async def ping(self):\n await self.backchannel.ping(self.log_id)\n return True\n\n @property\n def vcs_type(self) -> Optional[str]:\n return self.vcs_info.get(\"vcs_type\")\n\n @property\n def main_branch_url(self) -> Optional[str]:\n return self.vcs_info.get(\"branch_url\")\n\n @property\n def subpath(self) -> Optional[str]:\n return self.vcs_info.get(\"subpath\")\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n return False\n this_json = self.json()\n del this_json['current_duration']\n other_json = other.json()\n del other_json['current_duration']\n return this_json == other_json\n\n def json(self) -> Any:\n \"\"\"Return a JSON representation.\"\"\"\n return {\n \"queue_id\": self.queue_id,\n \"id\": self.log_id,\n \"codebase\": self.codebase,\n \"change_set\": self.change_set,\n \"campaign\": self.campaign,\n \"command\": self.command,\n \"estimated_duration\": self.estimated_duration.total_seconds()\n if self.estimated_duration\n else None,\n \"current_duration\": self.current_duration.total_seconds(),\n \"start_time\": self.start_time.isoformat(),\n \"worker\": self.worker_name,\n \"worker_link\": self.worker_link,\n \"vcs\": self.vcs_info,\n \"backchannel\": self.backchannel.json(),\n \"instigated_context\": self.instigated_context,\n \"resume_from\": self.resume_from,\n }\n\n\nclass JenkinsBackchannel(Backchannel):\n\n KEEPALIVE_TIMEOUT = 60\n\n def __init__(self, my_url: URL, metadata=None) -> None:\n self.my_url = my_url\n self._metadata = metadata\n\n @classmethod\n def from_json(cls, js):\n return cls(\n my_url=URL(js['my_url']),\n metadata=js['jenkins']\n )\n\n def __repr__(self) -> str:\n return f\"<{type(self).__name__}({self.my_url!r})>\"\n\n async def kill(self) -> None:\n raise NotImplementedError(self.kill)\n\n async def list_log_files(self):\n return ['worker.log']\n\n async def get_log_file(self, name):\n if name != 'worker.log':\n raise FileNotFoundError(name)\n async with ClientSession() as session, \\\n session.get(\n self.my_url / 'logText/progressiveText',\n raise_for_status=True) as resp:\n return BytesIO(await resp.read())\n\n async def _get_job(self, session):\n url = self.my_url / 'api/json'\n logging.info('Fetching Jenkins URL %s', url)\n async with session.get(\n url, raise_for_status=True,\n timeout=ClientTimeout(self.KEEPALIVE_TIMEOUT)) as resp:\n return await resp.json()\n\n async def ping(self, expected_log_id):\n async with ClientSession() as session:\n try:\n job = await self._get_job(session)\n except (ClientConnectorError, ServerDisconnectedError,\n asyncio.TimeoutError, ClientOSError) as e:\n raise PingTimeout(f'Failed to ping client {self.my_url}: {e}') from e\n except ClientResponseError as e:\n if e.status == 404:\n raise PingFatalFailure(f'Jenkins job {self.my_url} has disappeared') from e\n else:\n raise PingFailure(f'Failed to ping client {self.my_url}: {e}') from e\n else:\n # If Jenkins has listed the job as having failed, then we can't\n # expect anything to be uploaded\n if job.get('result') == 'FAILURE':\n raise PingFatalFailure(f'Jenkins lists job {job[\"id\"]} for run {expected_log_id} as failed')\n\n def json(self):\n return {\n 'my_url': str(self.my_url),\n 'jenkins': self._metadata,\n }\n\n\nclass PollingBackchannel(Backchannel):\n\n KEEPALIVE_TIMEOUT = 60\n\n def __init__(self, my_url: URL) -> None:\n self.my_url = my_url\n\n @classmethod\n def from_json(cls, js):\n return cls(\n my_url=URL(js['my_url']),\n )\n\n def __repr__(self) -> str:\n return f\"<{type(self).__name__}({self.my_url!r})>\"\n\n async def kill(self) -> None:\n async with ClientSession() as session, \\\n session.post(\n self.my_url / 'kill', headers={\n 'Accept': 'application/json'},\n raise_for_status=True):\n pass\n\n async def list_log_files(self):\n # TODO(jelmer)\n async with ClientSession() as session, \\\n session.get(\n self.my_url / 'logs', headers={\n 'Accept': 'application/json'},\n raise_for_status=True) as resp:\n return await resp.json()\n\n async def get_log_file(self, name):\n async with ClientSession() as session, \\\n session.get(\n self.my_url / 'logs' / name,\n raise_for_status=True) as resp:\n return BytesIO(await resp.read())\n\n async def ping(self, expected_log_id):\n health_url = self.my_url / 'log-id'\n logging.info('Pinging URL %s', health_url, extra={'run_id': expected_log_id})\n async with ClientSession() as session:\n try:\n async with session.get(\n health_url, raise_for_status=True,\n timeout=ClientTimeout(self.KEEPALIVE_TIMEOUT)) as resp:\n log_id = (await resp.read()).decode()\n except (ClientConnectorError, ClientResponseError,\n asyncio.TimeoutError, ClientOSError,\n ServerDisconnectedError) as err:\n raise PingTimeout(f'Failed to ping client {self.my_url}: {err}') from err\n\n if log_id != expected_log_id:\n raise PingFatalFailure(\n f'Worker started processing new run {log_id} rather than {expected_log_id}')\n\n def json(self):\n return {\n 'my_url': str(self.my_url),\n }\n\n\ndef _parse_unexpected_http_status(e):\n if e.code == 429:\n try:\n retry_after = int(e.headers['Retry-After']) # type: ignore\n except TypeError:\n logging.warning(\n 'Unable to parse retry-after header: %s',\n e.headers['Retry-After']) # type: ignore\n retry_after = None\n else:\n retry_after = None\n raise BranchRateLimited(e.path, str(e), retry_after=retry_after) from e\n\n\ndef open_resume_branch(\n main_branch: Branch, campaign_name: str, package: str,\n possible_forges: Optional[list[Forge]] = None) -> Optional[Branch]:\n try:\n forge = get_forge(main_branch, possible_forges=possible_forges)\n except UnsupportedForge as e:\n # We can't figure out what branch to resume from when there's\n # no forge that can tell us.\n logging.warning(\"Unsupported forge (%s)\", e)\n return None\n except ForgeLoginRequired as e:\n logging.warning(\"No credentials for forge (%s)\", e)\n return None\n except (ssl.SSLCertVerificationError, ssl.SSLZeroReturnError) as e:\n logging.warning(\"SSL error probing for forge (%s)\", e)\n return None\n except ConnectionError as e:\n logging.warning(\"Connection error opening resume branch (%s)\", e)\n return None\n except UnexpectedHttpStatus as e:\n _parse_unexpected_http_status(e)\n raise e\n else:\n try:\n for option in [campaign_name, ('%s/main' % campaign_name), (f'{campaign_name}/main/{package}')]:\n (\n resume_branch,\n unused_overwrite,\n unused_existing_proposals,\n ) = find_existing_proposed(\n main_branch, forge, option,\n preferred_schemes=['https', 'git', 'bzr'])\n if resume_branch:\n break\n except NoSuchProject as e:\n logging.warning(\"Project %s not found\", e.project)\n return None\n except PermissionDenied as e:\n logging.warning(\"Unable to list existing proposals: %s\", e)\n return None\n except UnusableRedirect as e:\n logging.warning(\"Unable to list existing proposals: %s\", e)\n return None\n except UnexpectedHttpStatus as e:\n _parse_unexpected_http_status(e)\n logging.warning(\n 'Unexpected HTTP status for %s: %s %s', e.path,\n e.code, e.extra)\n # TODO(jelmer): Considering re-raising here for some errors?\n return None\n else:\n return resume_branch\n\n\nasync def check_resume_result(\n conn: asyncpg.Connection, campaign: str,\n resume_branch: Branch) -> Optional[\"ResumeInfo\"]:\n row = await conn.fetchrow(\n \"SELECT id, result, publish_status, \"\n \"array(SELECT row(role, remote_name, base_revision, revision) \"\n \"FROM new_result_branch WHERE run_id = run.id) AS result_branches \"\n \"FROM run \"\n \"WHERE suite = $1 AND revision = $2 AND result_code = 'success' \"\n \"ORDER BY finish_time DESC LIMIT 1\",\n campaign,\n resume_branch.last_revision().decode(\"utf-8\"),\n )\n if row is not None:\n resume_run_id = row['id']\n resume_branch_result = row['result']\n resume_publish_status = row['publish_status']\n resume_result_branches = [\n (role, name,\n base_revision.encode(\"utf-8\") if base_revision else None,\n revision.encode(\"utf-8\") if revision else None)\n for (role, name, base_revision, revision) in row['result_branches']]\n else:\n logging.warning(\n 'Unable to find resume branch %r in database',\n resume_branch)\n return None\n if resume_publish_status == \"rejected\":\n logging.info(\"Unsetting resume branch, since last run was rejected.\")\n return None\n return ResumeInfo(\n run_id=resume_run_id, branch=resume_branch, result=resume_branch_result,\n result_branches=resume_result_branches or [])\n\n\nclass ResumeInfo:\n def __init__(self, *, run_id, branch, result, result_branches) -> None:\n self.run_id = run_id\n self.branch = branch\n self.result = result\n self.resume_result_branches = result_branches\n\n @property\n def resume_branch_url(self):\n return full_branch_url(self.branch)\n\n def json(self):\n return {\n \"run_id\": self.run_id,\n \"result\": self.result,\n \"branch_url\": self.resume_branch_url,\n \"branches\": [\n (fn, n, br.decode(\"utf-8\") if br is not None else None,\n r.decode(\"utf-8\") if r is not None else None)\n for (fn, n, br, r) in self.resume_result_branches\n ],\n }\n\n\ndef cache_branch_name(distro_config, role):\n if role != 'main':\n raise ValueError(role)\n return \"%s/latest\" % (distro_config.vendor or dpkg_vendor().lower())\n\n\nasync def store_change_set(\n conn: asyncpg.Connection,\n name: str,\n campaign: str):\n await conn.execute(\n \"\"\"INSERT INTO change_set (id, campaign) VALUES ($1, $2)\n ON CONFLICT DO NOTHING\"\"\",\n name, campaign)\n\n\nasync def store_run(\n conn: asyncpg.Connection,\n *,\n run_id: str,\n codebase: str,\n vcs_type: Optional[str],\n branch_url: Optional[str],\n start_time: datetime,\n finish_time: datetime,\n command: str,\n description: Optional[str],\n instigated_context: Optional[str],\n context: Optional[str],\n main_branch_revision: Optional[bytes],\n result_code: str,\n revision: Optional[bytes],\n codemod_result: Optional[Any],\n campaign: str,\n logfilenames: list[str],\n value: Optional[int],\n worker_name: Optional[str] = None,\n subpath: Optional[str] = \"\",\n result_branches: Optional[list[tuple[str, str, bytes, bytes]]] = None,\n result_tags: Optional[list[tuple[str, bytes]]] = None,\n resume_from: Optional[str] = None,\n failure_details: Optional[Any] = None,\n failure_stage: Optional[str] = None,\n target_branch_url: Optional[str] = None,\n change_set: Optional[str] = None,\n failure_transient: Optional[bool] = None,\n):\n \"\"\"Store a run in the database.\"\"\"\n if result_tags is None:\n result_tags_updated = None\n else:\n result_tags_updated = [(n, r.decode(\"utf-8\")) for (n, r) in result_tags]\n\n await conn.execute(\n \"INSERT INTO run (id, command, description, result_code, \"\n \"start_time, finish_time, instigated_context, context, \"\n \"main_branch_revision, \"\n \"revision, result, suite, vcs_type, branch_url, subpath, logfilenames, \"\n \"value, worker, result_tags, \"\n \"resume_from, failure_details, failure_stage, target_branch_url, change_set, \"\n \"failure_transient, codebase) \"\n \"VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, \"\n \"$12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, \"\n \"$24, $25, $26)\",\n run_id,\n command,\n description,\n result_code,\n start_time,\n finish_time,\n instigated_context,\n context,\n main_branch_revision.decode(\"utf-8\") if main_branch_revision else None,\n revision.decode(\"utf-8\") if revision else None,\n codemod_result if codemod_result else None,\n campaign,\n vcs_type,\n branch_url,\n subpath,\n logfilenames,\n value,\n worker_name,\n result_tags_updated,\n resume_from,\n failure_details,\n failure_stage,\n target_branch_url,\n change_set,\n failure_transient,\n codebase,\n )\n\n if result_branches:\n roles = [role for (role, remote_name, br, r) in result_branches]\n assert len(roles) == len(set(roles)), \"Duplicate result branches: %r\" % result_branches\n await conn.executemany(\n \"INSERT INTO new_result_branch \"\n \"(run_id, role, remote_name, base_revision, revision) \"\n \"VALUES ($1, $2, $3, $4, $5)\",\n [\n (run_id, role, remote_name, br.decode(\"utf-8\") if br else None, r.decode(\"utf-8\") if r else None)\n for (role, remote_name, br, r) in result_branches\n ],\n )\n\n\nclass RunExists(Exception):\n \"\"\"Run already exists.\"\"\"\n\n def __init__(self, run_id) -> None:\n self.run_id = run_id\n\n\nclass QueueItemAlreadyClaimed(Exception):\n \"\"\"Queue item has been claimed by another run.\"\"\"\n\n def __init__(self, queue_id, run_id) -> None:\n self.queue_id = queue_id\n self.run_id = run_id\n\n\nclass QueueProcessor:\n\n avoid_hosts: set[str]\n\n def __init__(\n self,\n database: asyncpg.pool.Pool,\n redis,\n run_timeout: int,\n logfile_manager: LogFileManager,\n artifact_manager: Optional[ArtifactManager] = None,\n public_vcs_managers: Optional[dict[str, VcsManager]] = None,\n use_cached_only: bool = False,\n committer: Optional[str] = None,\n backup_artifact_manager: Optional[ArtifactManager] = None,\n backup_logfile_manager: Optional[LogFileManager] = None,\n avoid_hosts: Optional[set[str]] = None,\n dep_server_url: Optional[str] = None,\n apt_archive_url: Optional[str] = None,\n ) -> None:\n \"\"\"Create a queue processor.\"\"\"\n self.database = database\n self.redis = redis\n self.logfile_manager = logfile_manager\n self.artifact_manager = artifact_manager\n self.public_vcs_managers = public_vcs_managers\n self.use_cached_only = use_cached_only\n self.committer = committer\n self.backup_artifact_manager = backup_artifact_manager\n self.backup_logfile_manager = backup_logfile_manager\n self.run_timeout = run_timeout\n self.dep_server_url = dep_server_url\n self.avoid_hosts = avoid_hosts or set()\n self.apt_archive_url = apt_archive_url\n self._jobs_scheduler = aiojobs.Scheduler(limit=2)\n self._watch_dog: Optional[asyncio.Task] = None\n\n def start_watchdog(self):\n if self._watch_dog is not None:\n raise Exception(\"Watchdog already started\")\n loop = asyncio.get_event_loop()\n self._watch_dog = loop.create_task(self._watchdog())\n\n def log_result(future):\n try:\n future.result()\n except BaseException:\n logging.exception('watch dog failed')\n else:\n logging.error('watch dog completed?')\n self._watch_dog.add_done_callback(log_result)\n\n def stop_watchdog(self):\n if self._watch_dog is None:\n return\n try:\n self._watch_dog.cancel()\n except asyncio.CancelledError:\n pass\n self._watch_dog = None\n\n async def stop(self):\n self.stop_watchdog()\n await self._jobs_scheduler.close()\n\n KEEPALIVE_INTERVAL = 10\n\n async def _healthcheck_active_run(self, active_run, keepalive_age):\n try:\n await active_run.ping()\n except NotImplementedError:\n if keepalive_age > timedelta(days=1):\n try:\n await self.abort_run(\n active_run, 'run-disappeared',\n \"no support for ping, and haven't heard back in > 1 day\", transient=True)\n except RunExists:\n logging.warning('Run exists. Not properly cleaned up?', extra={'run_id': active_run.log_id})\n return\n except PingFatalFailure as e:\n try:\n await self.abort_run(active_run, 'run-disappeared', e.reason,\n transient=True)\n except RunExists:\n logging.warning('Run not properly cleaned up?', extra={'run_id': active_run.log_id})\n return\n except PingFailure as e:\n logging.warning(\n 'Failed to ping %s: %s', active_run.log_id, e, extra={'run_id': active_run.log_id})\n else:\n await self.redis.hset(\n 'last-keepalive', active_run.log_id,\n datetime.utcnow().isoformat())\n keepalive_age = timedelta(seconds=0)\n\n if keepalive_age > timedelta(minutes=self.run_timeout):\n logging.warning(\n \"No keepalives received from %s for %s in %s, aborting.\",\n active_run.worker_name,\n active_run.log_id,\n keepalive_age, extra={'run_id': active_run.log_id}\n )\n try:\n await self.abort_run(\n active_run, code='worker-timeout',\n description=(\"No keepalives received in %s.\" % keepalive_age),\n transient=True)\n except RunExists:\n logging.warning('Run exists. Not properly cleaned up?', extra={'run_id': active_run.log_id})\n return\n\n async def _watchdog(self):\n while True:\n # TODO(jelmer): Use asyncio.TaskGroup when python >= 3.11\n tasks = []\n for serialized in (await self.redis.hgetall('active-runs')).values():\n js = json.loads(serialized)\n active_run = ActiveRun.from_json(js)\n lk = await self.redis.hget('last-keepalive', active_run.log_id)\n if lk:\n last_keepalive = datetime.fromisoformat(lk.decode('utf-8'))\n else:\n last_keepalive = active_run.start_time\n keepalive_age = datetime.utcnow() - last_keepalive\n if keepalive_age < timedelta(minutes=(self.run_timeout // 3)):\n continue\n tasks.append(asyncio.create_task(self._healthcheck_active_run(\n active_run, keepalive_age)))\n if tasks:\n done, _ = await asyncio.wait(tasks)\n for task in done:\n try:\n await task\n except Exception as e:\n logging.exception(\n 'Failed to healthcheck %s: %r', active_run.log_id, e)\n await asyncio.sleep(self.KEEPALIVE_INTERVAL)\n\n async def rate_limited_hosts(self):\n for h, t in (await self.redis.hgetall('rate-limit-hosts')).items():\n dt = datetime.fromisoformat(t.decode('utf-8'))\n if dt > datetime.utcnow():\n yield h.decode('utf-8'), dt\n\n async def active_run_count(self):\n return await self.redis.hlen('active-runs')\n\n async def estimate_wait(\n self, codebase: str, campaign: str) -> tuple[\n Optional[int], Optional[timedelta], Optional[timedelta]]:\n async with self.database.acquire() as conn:\n queue = Queue(conn)\n (position, wait_time) = await queue.get_position(\n campaign, codebase)\n active_run_count = await self.active_run_count()\n return (position,\n (wait_time / active_run_count)\n if wait_time is not None else None,\n wait_time)\n\n async def status_json(self) -> Any:\n last_keepalives = {\n r.decode('utf-8'): datetime.fromisoformat(v.decode('utf-8'))\n for (r, v) in (await self.redis.hgetall('last-keepalive')).items()}\n processing = []\n for e in (await self.redis.hgetall('active-runs')).values():\n js = json.loads(e)\n last_keepalive = last_keepalives.get(js['id'])\n if last_keepalive:\n js['last-keepalive'] = last_keepalive.isoformat(timespec='seconds')\n js['keepalive_age'] = (datetime.utcnow() - last_keepalive).total_seconds()\n js['mia'] = js['keepalive_age'] > self.run_timeout * 60\n else:\n js['keepalive_age'] = None\n js['last-keepalive'] = None\n js['mia'] = None\n processing.append(js)\n return {\n \"processing\": processing,\n \"avoid_hosts\": list(self.avoid_hosts),\n \"rate_limit_hosts\": {\n h: t.isoformat(timespec='seconds')\n async for (h, t) in self.rate_limited_hosts()},\n }\n\n async def register_run(self, active_run: ActiveRun) -> None:\n # Ideally we'd do this check *in* the transaction, but\n # fakeredis doesn't seem to do Pipeline.hget()\n run_id = await self.redis.hget('assigned-queue-items', str(active_run.queue_id))\n if run_id:\n raise QueueItemAlreadyClaimed(active_run.queue_id, run_id)\n async with self.redis.pipeline() as tr:\n tr.hset(\n 'active-runs', active_run.log_id, json.dumps(active_run.json()))\n tr.hset(\n 'assigned-queue-items', str(active_run.queue_id), active_run.log_id)\n tr.hset(\n 'last-keepalive', active_run.log_id, datetime.utcnow().isoformat())\n await tr.execute()\n await self.redis.publish('queue', json.dumps(await self.status_json()))\n active_run_count.labels(worker=active_run.worker_name).inc()\n run_count.inc()\n\n async def update_run(self, active_run: ActiveRun) -> None:\n await self.redis.hset('active-runs', active_run.log_id, json.dumps(active_run.json()))\n\n async def get_run(self, log_id: str) -> Optional[ActiveRun]:\n serialized = await self.redis.hget('active-runs', log_id)\n if not serialized:\n return None\n js = json.loads(serialized)\n return ActiveRun.from_json(js)\n\n async def unclaim_run(self, log_id: str) -> None:\n active_run = await self.get_run(log_id)\n active_run_count.labels(worker=active_run.worker_name if active_run else None).dec()\n if not active_run:\n return\n async with self.redis.pipeline() as tr:\n tr.hdel('assigned-queue-items', str(active_run.queue_id))\n tr.hdel('active-runs', log_id)\n tr.hdel('last-keepalive', log_id)\n await tr.execute()\n\n async def abort_run(self, run: ActiveRun, code: str, description: str, transient=None) -> None:\n result = run.create_result(\n branch_url=run.main_branch_url,\n vcs_type=run.vcs_type,\n description=description,\n code=code,\n logfilenames=[],\n transient=transient\n )\n await self.finish_run(run, result)\n\n async def finish_run(self, active_run: ActiveRun, result: JanitorResult) -> None:\n run_result_count.labels(\n campaign=active_run.campaign,\n result_code=result.code).inc()\n build_duration.labels(campaign=active_run.campaign).observe(\n result.duration.total_seconds()\n )\n async with self.database.acquire() as conn:\n async with conn.transaction():\n if not result.change_set:\n result.change_set = result.log_id\n await store_change_set(\n conn, result.change_set, campaign=result.campaign)\n try:\n await store_run(\n conn,\n run_id=result.log_id,\n vcs_type=result.vcs_type,\n subpath=result.subpath,\n branch_url=result.branch_url,\n start_time=result.start_time,\n finish_time=result.finish_time,\n command=active_run.command,\n description=result.description,\n instigated_context=active_run.instigated_context,\n context=result.context,\n main_branch_revision=result.main_branch_revision,\n result_code=result.code,\n revision=result.revision,\n codemod_result=result.codemod_result,\n campaign=active_run.campaign,\n logfilenames=result.logfilenames,\n value=result.value,\n worker_name=result.worker_name,\n result_branches=result.branches,\n result_tags=result.tags,\n failure_details=result.failure_details,\n failure_stage=result.failure_stage,\n resume_from=result.resume_from,\n target_branch_url=result.target_branch_url,\n change_set=result.change_set,\n failure_transient=result.transient,\n codebase=result.codebase,\n )\n except asyncpg.UniqueViolationError as e:\n if e.constraint_name == 'run_pkey':\n logging.debug('Unique violation error creating run: %r', e, extra={'run_id': active_run.log_id})\n await self.unclaim_run(result.log_id)\n raise RunExists(result.log_id) from e\n raise\n if result.builder_result:\n await result.builder_result.store(conn, result.log_id)\n await conn.execute(\"DELETE FROM queue WHERE id = $1\", active_run.queue_id)\n\n await self.redis.publish('result', json.dumps(result.json()))\n await self.unclaim_run(result.log_id)\n await self.redis.publish('queue', json.dumps(await self.status_json()))\n last_success_gauge.set_to_current_time()\n\n async def reschedule():\n async with self.database.acquire() as schedule_conn:\n try:\n await do_schedule_regular(\n schedule_conn, campaign=active_run.campaign,\n change_set=active_run.change_set, context=result.context,\n requester='after run schedule', codebase=result.codebase)\n except CandidateUnavailable:\n # Maybe this was a one-off schedule without candidate, or\n # the candidate has been removed. Either way, this is fine.\n logging.debug(\n 'not rescheduling %s/%s: no candidate available',\n active_run.codebase, active_run.campaign)\n\n await self._jobs_scheduler.spawn(reschedule())\n\n async def rate_limited(self, host, retry_after):\n rate_limited_count.labels(host=host).inc()\n if not retry_after:\n retry_after = datetime.utcnow() + timedelta(seconds=DEFAULT_RETRY_AFTER)\n await self.redis.hset(\n 'rate-limit-hosts', host, retry_after.isoformat())\n\n async def next_queue_item(\n self, conn, codebase: Optional[str] = None,\n campaign: Optional[str] = None) -> tuple[Optional[QueueItem], dict[str, str]]:\n queue = Queue(conn)\n exclude_hosts = set(self.avoid_hosts)\n async for host, _retry_after in self.rate_limited_hosts():\n exclude_hosts.add(host)\n assigned_queue_items = {\n int(i.decode('utf-8'))\n for i in await self.redis.hkeys('assigned-queue-items')}\n return await queue.next_item(\n campaign=campaign, codebase=codebase,\n assigned_queue_items=assigned_queue_items,\n exclude_hosts=exclude_hosts)\n\n\n@routes.get(\"/queue/position\", name=\"queue-position\")\nasync def handle_queue_position(request):\n span = aiozipkin.request_span(request)\n codebase = request.query['codebase']\n campaign = request.query['campaign']\n with span.new_child('sql:queue-position'):\n (position, wait_time,\n cum_wait_time) = await request.app['queue_processor'].estimate_wait(\n codebase, campaign)\n\n return web.json_response({\n \"position\": position,\n \"wait_time\":\n wait_time.total_seconds() if wait_time is not None else None,\n \"cumulative_wait_time\":\n cum_wait_time.total_seconds()\n if cum_wait_time is not None else None,\n })\n\n\n@routes.post(\"/schedule-control\", name=\"schedule-control\")\nasync def handle_schedule_control(request):\n span = aiozipkin.request_span(request)\n json = await request.json()\n change_set = json.get('change_set')\n offset = json.get('offset')\n requester = json['requester']\n refresh = json.get('refresh', False)\n bucket = json.get('bucket')\n estimated_duration = (\n timedelta(seconds=json['estimated_duration'])\n if json.get('estimated_duration') else None)\n\n async with request.app['database'].acquire() as conn:\n try:\n run_id = json['run_id']\n except KeyError:\n codebase = json['codebase']\n main_branch_revision = json['main_branch_revision'].encode('utf-8')\n else:\n with span.new_child('sql:find-run'):\n run = await conn.fetchrow(\n \"SELECT main_branch_revision, codebase FROM run \"\n \"WHERE id = $1\",\n run_id)\n if run is None:\n return web.json_response({\"reason\": \"Run not found\"}, status=404)\n codebase = run['codebase']\n main_branch_revision = run['main_branch_revision'].encode('utf-8')\n with span.new_child('do-schedule-control'):\n offset, estimated_duration, queue_id, bucket = await do_schedule_control(\n conn,\n change_set=change_set,\n main_branch_revision=main_branch_revision,\n offset=offset,\n refresh=refresh,\n bucket=bucket,\n requester=requester,\n codebase=codebase,\n estimated_duration=estimated_duration)\n\n response_obj = {\n \"campaign\": \"control\",\n \"offset\": offset,\n \"bucket\": bucket,\n \"codebase\": codebase,\n \"queue_id\": queue_id,\n \"estimated_duration_seconds\":\n estimated_duration.total_seconds() if estimated_duration else None,\n }\n return web.json_response(response_obj)\n\n\n@routes.post(\"/schedule\", name=\"schedule\")\nasync def handle_schedule(request):\n span = aiozipkin.request_span(request)\n json = await request.json()\n async with request.app['database'].acquire() as conn:\n try:\n run_id = json['run_id']\n except KeyError:\n campaign = json['campaign']\n codebase = json['codebase']\n run = None\n else:\n run = await conn.fetchrow(\n \"SELECT suite AS campaign, codebase, command FROM run WHERE id = $1\",\n run_id)\n if run is None:\n return web.json_response({\"reason\": \"Run not found\"}, status=404)\n campaign = run['campaign']\n codebase = run['codebase']\n refresh = json.get('refresh', False)\n change_set = json.get('change_set')\n requester = json.get('requester')\n bucket = json.get('bucket')\n offset = json.get('offset')\n estimated_duration = (\n timedelta(seconds=json['estimated_duration'])\n if json.get('estimated_duration') else None)\n command = await conn.fetchval(\n \"SELECT command \"\n \"FROM candidate WHERE codebase = $1 AND suite = $2\",\n codebase, campaign)\n if command is None:\n command = get_campaign_config(\n request.app['config'], campaign).command\n if command is None and run is not None:\n command = run['command']\n if command is None:\n raise web.HTTPBadRequest(text=\"no command specified\")\n\n try:\n with span.new_child('do-schedule'):\n offset, estimated_duration, queue_id, bucket = await do_schedule(\n conn,\n campaign=campaign,\n offset=offset,\n change_set=change_set,\n refresh=refresh,\n requester=requester,\n estimated_duration=estimated_duration,\n codebase=codebase,\n command=command,\n bucket=bucket)\n except CandidateUnavailable as e:\n raise web.HTTPBadRequest(text=\"Candidate not available\") from e\n\n response_obj = {\n \"campaign\": campaign,\n \"offset\": offset,\n \"bucket\": bucket,\n \"codebase\": codebase,\n \"queue_id\": queue_id,\n \"estimated_duration_seconds\":\n estimated_duration.total_seconds() if estimated_duration else None,\n }\n return web.json_response(response_obj)\n\n\n@routes.get(\"/status\", name=\"status\")\nasync def handle_status(request):\n queue_processor = request.app['queue_processor']\n return web.json_response(await queue_processor.status_json())\n\n\nasync def _find_active_run(request):\n queue_processor = request.app['queue_processor']\n run_id = request.match_info[\"run_id\"]\n queue_id = request.query.get('queue_id') # noqa: F841\n worker_name = request.query.get('worker_name') # noqa: F841\n active_run = await queue_processor.get_run(run_id)\n if not active_run:\n raise web.HTTPNotFound(text=\"No such current run: %s\" % run_id)\n return active_run\n\n\n@routes.get(\"/log/{run_id}\", name=\"log-index\")\nasync def handle_log_index(request):\n active_run = await _find_active_run(request)\n log_filenames = await active_run.backchannel.list_log_files()\n return web.json_response(log_filenames)\n\n\n@routes.post(\"/kill/{run_id}\", name=\"kill\")\nasync def handle_kill(request):\n active_run = await _find_active_run(request)\n ret = active_run.json()\n try:\n await active_run.backchannel.kill()\n except NotImplementedError as e:\n raise web.HTTPNotImplemented(\n text='kill not supported for this type of run') from e\n else:\n return web.json_response(ret)\n\n\n@routes.get(\"/log/{run_id}/{filename}\", name=\"log\")\nasync def handle_log(request):\n queue_processor = request.app['queue_processor']\n run_id = request.match_info[\"run_id\"]\n filename = request.match_info[\"filename\"]\n\n if \"/\" in filename:\n return web.Response(text=\"Invalid filename %s\" % filename, status=400)\n active_run = await queue_processor.get_run(run_id)\n if not active_run:\n return web.Response(text=\"No such current run: %s\" % run_id, status=404)\n try:\n f = await active_run.backchannel.get_log_file(filename)\n except FileNotFoundError:\n return web.Response(text=\"No such log file: %s\" % filename, status=404)\n\n try:\n response = web.StreamResponse(\n status=200, reason=\"OK\", headers={\"Content-Type\": \"text/plain\"}\n )\n await response.prepare(request)\n for chunk in f:\n await response.write(chunk)\n await response.write_eof()\n finally:\n f.close()\n return response\n\n\n@routes.get(\"/codebases\", name=\"download-codebases\")\nasync def handle_codebases_download(request):\n queue_processor = request.app['queue_processor']\n codebases = []\n\n async with queue_processor.database.acquire() as conn:\n for row in await conn.fetch(\n 'SELECT name, branch_url, url, branch, subpath, vcs_type, '\n 'web_url, vcs_last_revision, value FROM codebase'):\n codebases.append(dict(row))\n\n return web.json_response(codebases)\n\n\n@routes.post(\"/codebases\", name=\"upload-codebases\")\nasync def handle_codebases_upload(request):\n queue_processor = request.app['queue_processor']\n\n async with queue_processor.database.acquire() as conn:\n # TODO(jelmer): When a codebase with a certain name already exists,\n # steal its name\n insert_codebase_stmt = await conn.prepare(\n \"INSERT INTO codebase \"\n \"(name, branch_url, url, branch, subpath, vcs_type, \"\n \"vcs_last_revision, value, web_url) \"\n \"VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)\"\n \"ON CONFLICT (name) DO UPDATE SET \"\n \"branch_url = EXCLUDED.branch_url, subpath = EXCLUDED.subpath, \"\n \"vcs_type = EXCLUDED.vcs_type, \"\n \"vcs_last_revision = EXCLUDED.vcs_last_revision, \"\n \"value = EXCLUDED.value, url = EXCLUDED.url, \"\n \"branch = EXCLUDED.branch, web_url = EXCLUDED.web_url\")\n\n async with conn.transaction():\n for entry in await request.json():\n if 'branch_url' in entry:\n entry['url'], params = urlutils.split_segment_parameters(\n entry['branch_url'])\n if 'branch' in params:\n entry['branch'] = urlutils.unescape(params['branch'])\n elif 'branch' in entry:\n entry['branch_url'] = urlutils.join_segment_parameters(\n entry['url'], {'branch': urlutils.escape(entry['branch'])})\n elif 'url' in entry:\n entry['branch_url'] = entry['url']\n else:\n entry['branch_url'] = entry['url'] = None\n\n await insert_codebase_stmt.fetchrow(\n entry.get('name'), entry['branch_url'], entry['url'],\n entry.get('branch'), entry.get('subpath'),\n entry.get('vcs_type'), entry.get('vcs_last_revision'),\n entry.get('value'), entry.get('web_url'))\n\n # TODO(jelmer): if anything meaningful has changed (name,\n # branch_url, subpath), reschedule all runs for this codebase:\n # https://github.com/jelmer/janitor/issues/107\n\n return web.json_response({})\n\n\n@routes.delete(\"/candidates/{id}\", name=\"delete-candidate\")\nasync def handle_candidate_delete(request):\n queue_processor = request.app['queue_processor']\n candidate_id = int(request.match_info['id'])\n async with queue_processor.database.acquire() as conn, conn.transaction():\n await conn.fetchrow(\n 'DELETE FROM followup WHERE candidate = $1', candidate_id)\n (suite, codebase) = await conn.fetchrow(\n 'DELETE FROM candidate WHERE id = $1 RETURNING suite, codebase', candidate_id)\n await conn.execute(\n 'DELETE FROM queue WHERE suite = $1 AND codebase = $2',\n suite, codebase)\n return web.json_response({})\n\n\n@routes.get(\"/candidates\", name=\"download-candidates\")\nasync def handle_candidate_download(request):\n queue_processor = request.app['queue_processor']\n ret = []\n async with queue_processor.database.acquire() as conn:\n for row in await conn.fetch('SELECT * FROM candidate'):\n ret.append({\n 'id': row['id'],\n 'codebase': row['codebase'],\n 'campaign': row['suite'],\n 'command': row['command'],\n 'publish-policy': row['publish_policy'],\n 'change_set': row['change_set'],\n 'context': row['context'],\n 'value': row['value'],\n 'success_chance': row['success_chance'],\n })\n return web.json_response(ret)\n\n\n@routes.post(\"/candidates\", name=\"upload-candidates\")\nasync def handle_candidates_upload(request):\n span = aiozipkin.request_span(request)\n unknown_codebases = []\n unknown_campaigns = []\n invalid_command = []\n invalid_value = []\n unknown_publish_policies = []\n queue_processor = request.app['queue_processor']\n async with queue_processor.database.acquire() as conn:\n existing_runs_stmt = await conn.prepare(\n \"SELECT merge_proposal.url AS mp_url, \"\n \"last_effective_runs.command AS command \"\n \"FROM last_effective_runs \"\n \"LEFT JOIN merge_proposal \"\n \"ON last_effective_runs.revision = merge_proposal.revision \"\n \"WHERE merge_proposal.status = 'open' \"\n \"AND last_effective_runs.codebase = $1 \"\n \"AND last_effective_runs.suite = $2 \"\n \"AND last_effective_runs.command != $3\")\n insert_candidate_stmt = await conn.prepare(\n \"INSERT INTO candidate \"\n \"(suite, command, change_set, context, value, \"\n \"success_chance, publish_policy, codebase) \"\n \"VALUES ($1, $2, $3, $4, $5, $6, $7, $8) \"\n \"ON CONFLICT (codebase, suite, coalesce(change_set, ''::text)) \"\n \"DO UPDATE SET context = EXCLUDED.context, value = EXCLUDED.value, \"\n \"success_chance = EXCLUDED.success_chance, \"\n \"command = EXCLUDED.command, \"\n \"publish_policy = EXCLUDED.publish_policy, \"\n \"codebase = EXCLUDED.codebase RETURNING id\")\n insert_followup_stmt = await conn.prepare(\n \"INSERT INTO followup (origin, candidate) VALUES ($1, $2) \"\n \"ON CONFLICT DO NOTHING\")\n async with conn.transaction():\n known_campaign_names = [\n campaign.name for campaign in request.app['config'].campaign]\n\n ret = []\n with span.new_child('process-candidates'):\n for candidate in (await request.json()):\n tr = conn.transaction()\n await tr.start()\n try:\n try:\n codebase = candidate['codebase']\n except KeyError as e:\n raise web.HTTPBadRequest(\n text=f'no codebase field for candidate {candidate}') from e\n if codebase is None:\n raise web.HTTPBadRequest(\n text=f'codebase field is None for candidate {candidate}')\n try:\n campaign = candidate['campaign']\n except KeyError as e:\n raise web.HTTPBadRequest(\n text=f'no campaign field for candidate {candidate}') from e\n\n if campaign not in known_campaign_names:\n logging.warning('unknown campaign %r', campaign)\n unknown_campaigns.append(campaign)\n await tr.rollback()\n continue\n\n command = candidate.get('command')\n if not command:\n try:\n campaign_config = get_campaign_config(\n request.app['config'], campaign)\n except KeyError:\n logging.warning('unknown campaign %r', campaign)\n unknown_campaigns.append(campaign)\n await tr.rollback()\n continue\n command = campaign_config.command\n if not command:\n logging.warning(\n 'No command in candidate or campaign config')\n invalid_command.append(command)\n await tr.rollback()\n continue\n\n publish_policy = candidate.get('publish-policy')\n\n if candidate.get('value') == 0:\n logging.warning(\n 'invalid value for candidate: %r', candidate.get('value'))\n invalid_value.append(candidate.get('value'))\n await tr.rollback()\n continue\n\n with span.new_child('sql:insert-candidates'):\n try:\n candidate_id = await insert_candidate_stmt.fetchval(\n campaign,\n command,\n candidate.get('change_set'), candidate.get('context'),\n candidate.get('value'), candidate.get('success_chance'),\n publish_policy, candidate['codebase'])\n except asyncpg.ForeignKeyViolationError as e:\n if e.constraint_name == 'candidate_codebase_fkey':\n logging.warning(\n 'ignoring candidate %s/%s; codebase unknown',\n codebase, candidate['campaign'])\n unknown_codebases.append(codebase)\n await tr.rollback()\n continue\n elif e.constraint_name == 'candidate_publish_policy_fkey':\n logging.warning('unknown publish policy %s', publish_policy)\n unknown_publish_policies.append(publish_policy)\n await tr.rollback()\n continue\n else:\n raise\n\n # Adjust bucket if there are any open merge proposals with a\n # different command\n\n with span.new_child('sql:existing-runs'):\n existing_runs = await existing_runs_stmt.fetch(\n candidate['codebase'], campaign, command)\n\n if any(existing_runs):\n refresh = True\n if existing_runs[0]['mp_url']:\n bucket = 'update-existing-mp'\n requester = 'command changed for existing mp: {!r} ⇒ {!r}'.format(\n existing_runs[0]['command'], command)\n else:\n bucket = None\n requester = 'command changed: {!r} ⇒ {!r}'.format(\n existing_runs[0]['command'], command)\n else:\n bucket = candidate.get('bucket')\n refresh = False\n requester = \"candidate update\"\n\n if candidate.get('requester'):\n requester += f' {candidate[\"requester\"]}'\n\n with span.new_child('sql:insert-followups'):\n for origin in candidate.get('followup_for', []):\n await insert_followup_stmt.execute(candidate_id, origin)\n\n with span.new_child('schedule'):\n # This shouldn't raise CandidateUnavailable, since\n # we just added the candidate\n offset, estimated_duration, queue_id, bucket = await do_schedule_regular(\n conn,\n campaign=campaign,\n change_set=candidate.get('change_set'),\n bucket=bucket,\n requester=requester,\n command=command,\n codebase=candidate['codebase'],\n refresh=refresh)\n except BaseException:\n await tr.rollback()\n raise\n else:\n await tr.commit()\n\n ret.append({\n 'campaign': campaign,\n 'codebase': candidate['codebase'],\n 'bucket': bucket,\n 'change_set': candidate.get('change_set'),\n 'offset': offset,\n 'estimated_duration': estimated_duration.total_seconds()\n if estimated_duration is not None else None,\n 'queue-id': queue_id,\n 'refresh': refresh\n })\n\n return web.json_response({\n 'success': ret,\n 'invalid_command': invalid_command,\n 'invalid_value': invalid_value,\n 'unknown_campaigns': unknown_campaigns,\n 'unknown_codebases': unknown_codebases,\n 'unknown_publish_policies': unknown_publish_policies})\n\n\n@routes.get(\"/runs/{run_id}\", name=\"get-run\")\nasync def handle_get_run(request):\n run_id = request.match_info['run_id']\n async with request.app['database'].acquire() as conn:\n run = await conn.fetchrow('SELECT * FROM run WHERE id = $1', run_id)\n if run is None:\n raise web.HTTPNotFound(text=f\"no such run: {run_id}\")\n return web.json_response({\n 'codebase': run['codebase'],\n 'campaign': run['suite'],\n 'publish_status': run['publish_status'],\n })\n\n\n@routes.post(\"/runs/{run_id}\", name=\"update-run\")\nasync def handle_update_run(request):\n # TODO(jelmer): Move to publisher?\n run_id = request.match_info['run_id']\n queue_processor = request.app['queue_processor']\n data = await request.json()\n async with request.app['database'].acquire() as conn:\n row = await conn.fetchrow(\n 'UPDATE run SET publish_status = $2 WHERE id = $1 '\n 'RETURNING id, codebase, suite',\n run_id, data['publish_status'])\n if row is None:\n raise web.HTTPNotFound(text=f'no such run: {run_id}')\n ret = {\n 'run_id': run_id,\n 'publish_status': data['publish_status'],\n 'codebase': row[1],\n 'campaign': row[2]\n }\n await queue_processor.redis.publish('publish-status', json.dumps(ret))\n return web.json_response(ret)\n\n\n@routes.get(\"/active-runs\", name=\"get-active-runs\")\nasync def handle_get_active_runs(request):\n queue_processor = request.app['queue_processor']\n return web.json_response((await queue_processor.status_json())[\"processing\"])\n\n\n@routes.get(\"/active-runs/{run_id}\", name=\"get-active-run\")\nasync def handle_get_active_run(request):\n queue_processor = request.app['queue_processor']\n run_id = request.match_info['run_id']\n active_run = await queue_processor.get_run(run_id)\n if not active_run:\n raise web.HTTPNotFound(text='no such run %s' % run_id)\n return web.json_response(active_run.json())\n\n\n@routes.post(\"/active-runs\", name=\"assign\")\nasync def handle_assign(request):\n json = await request.json()\n assignment_count.labels(worker=json.get(\"worker\")).inc()\n span = aiozipkin.request_span(request)\n queue_processor = request.app['queue_processor']\n try:\n assignment = await next_item(\n queue_processor, request.app['config'],\n span, 'assign', worker=json.get(\"worker\"),\n worker_link=json.get(\"worker_link\"),\n backchannel=json.get('backchannel'),\n codebase=json.get('codebase'),\n campaign=json.get('campaign')\n )\n except QueueEmpty:\n return web.json_response({'reason': 'queue empty'}, status=503)\n except QueueRateLimiting as e:\n return web.json_response(\n {'reason': str(e)}, status=429, headers={\n 'Retry-After': str(e.retry_after or DEFAULT_RETRY_AFTER)})\n return web.json_response(\n assignment, status=201, headers={\n 'Location': str(request.app.router['get-active-run'].url_for(\n run_id=assignment['id']))\n })\n\n\nasync def handle_public_assign(request):\n json = await request.json()\n span = aiozipkin.request_span(request)\n with span.new_child('check-worker-creds'):\n worker_name = await check_worker_creds(request.app['database'], request)\n assignment_count.labels(worker=worker_name).inc()\n queue_processor = request.app['queue_processor']\n try:\n assignment = await next_item(\n queue_processor, request.app['config'],\n span, 'assign', worker=worker_name,\n worker_link=json.get(\"worker_link\"),\n backchannel=json.get('backchannel'),\n codebase=json.get('codebase'),\n campaign=json.get('campaign')\n )\n except QueueEmpty:\n return web.json_response({'reason': 'queue empty'}, status=503)\n except QueueRateLimiting as e:\n return web.json_response(\n {'reason': str(e)}, status=429, headers={\n 'Retry-After': str(e.retry_after or DEFAULT_RETRY_AFTER)})\n return web.json_response(\n assignment, status=201, headers={\n 'Location': str(request.app.router['get-active-run'].url_for(\n run_id=assignment['id']))\n })\n\n\n@routes.get(\"/active-runs/+peek\", name=\"peek\")\nasync def handle_peek(request):\n span = aiozipkin.request_span(request)\n queue_processor = request.app['queue_processor']\n try:\n assignment = await next_item(\n queue_processor, request.app['config'], span, 'peek')\n except QueueEmpty:\n return web.json_response({'reason': 'queue empty'}, status=503)\n except QueueRateLimiting as e:\n return web.json_response(\n {'reason': str(e)}, status=429, headers={\n 'Retry-After': str(e.retry_after or DEFAULT_RETRY_AFTER)})\n return web.json_response(\n assignment, status=201, headers={\n 'Location': str(request.app.router['get-active-run'].url_for(\n run_id=assignment['id']))\n })\n\n\n@routes.get(\"/queue\", name=\"queue\")\nasync def handle_queue(request):\n response_obj = []\n queue_processor = request.app['queue_processor']\n if 'limit' in request.query:\n limit = int(request.query['limit'])\n else:\n limit = None\n async with queue_processor.database.acquire() as conn:\n queue = Queue(conn)\n for entry in await queue.iter_queue(limit=limit):\n response_obj.append({\n \"queue_id\": entry.id,\n \"codebase\": entry.codebase,\n \"campaign\": entry.campaign,\n \"context\": entry.context,\n \"command\": entry.command,\n })\n return web.json_response(response_obj)\n\n\nclass QueueEmpty(Exception):\n \"\"\"Queue is empty.\"\"\"\n\n\nclass QueueRateLimiting(Exception):\n \"\"\"Rate limiting encountered while getting queue item.\"\"\"\n\n def __init__(self, retry_after) -> None:\n self.retry_after = retry_after\n\n\nasync def next_item(\n queue_processor, config, span, mode, *, worker=None,\n worker_link: Optional[str] = None,\n backchannel: Optional[dict[str, str]] = None,\n codebase: Optional[str] = None, campaign: Optional[str] = None):\n possible_transports: list[Transport] = []\n possible_forges: list[Forge] = []\n\n async def abort(active_run, code, description):\n result = active_run.create_result(\n branch_url=active_run.main_branch_url,\n vcs_type=active_run.vcs_type,\n code=code,\n description=description\n )\n try:\n await queue_processor.finish_run(active_run, result)\n except RunExists:\n pass\n\n async with queue_processor.database.acquire() as conn:\n item = None\n while item is None:\n with span.new_child('sql:queue-item'):\n item, vcs_info = await queue_processor.next_queue_item(\n conn, codebase=codebase, campaign=campaign)\n if item is None:\n queue_empty_count.inc()\n raise QueueEmpty()\n\n bc: Backchannel\n if backchannel and backchannel['kind'] == 'http':\n bc = PollingBackchannel(my_url=URL(backchannel['url']))\n elif backchannel and backchannel['kind'] == 'jenkins':\n bc = JenkinsBackchannel(my_url=URL(backchannel['url']))\n else:\n bc = Backchannel()\n\n active_run = ActiveRun.from_queue_item(\n backchannel=bc, worker_name=worker, queue_item=item,\n vcs_info=vcs_info, worker_link=worker_link)\n\n try:\n await queue_processor.register_run(active_run)\n except QueueItemAlreadyClaimed as e:\n logging.debug(\n 'Our queue item (%d) is already in progress by %s',\n e.queue_id, e.run_id, extra={'run_id': active_run.log_id})\n item = None\n continue\n\n try:\n campaign_config = get_campaign_config(config, item.campaign)\n except KeyError:\n logging.warning(\n 'Unable to find details for campaign %r', item.campaign,\n extra={'run_id': active_run.log_id})\n await abort(active_run, 'unknown-campaign',\n \"Campaign %s unknown\" % item.campaign)\n item = None\n continue\n\n if not campaign_config.default_empty and (\n vcs_info.get(\"branch_url\") is None):\n await abort(active_run, 'not-in-vcs', \"No VCS URL known for codebase.\")\n item = None\n continue\n\n # TODO(jelmer): Handle exceptions from get_builder\n builder = get_builder(\n config, campaign_config,\n queue_processor.apt_archive_url,\n queue_processor.dep_server_url)\n\n if vcs_info.get(\"branch_url\") is not None:\n try:\n with span.new_child('branch:open'):\n probers = select_preferred_probers(vcs_info.get('vcs_type'))\n logging.info(\n 'Opening branch %s with %r', vcs_info['branch_url'],\n [p.__name__ for p in probers])\n main_branch = await to_thread_timeout(\n REMOTE_BRANCH_OPEN_TIMEOUT, open_branch_ext,\n vcs_info['branch_url'],\n possible_transports=possible_transports, probers=probers)\n except BranchRateLimited as e:\n host = urlutils.URL.from_string(vcs_info['branch_url']).host\n logging.warning('Rate limiting for %s: %r', host, e)\n await queue_processor.rate_limited(host, e.retry_after)\n await abort(active_run, 'pull-rate-limited', str(e))\n raise QueueRateLimiting(e.retry_after) from e\n except BranchOpenFailure as e:\n logging.debug(\n 'Error opening branch %s: %s', vcs_info['branch_url'],\n e)\n resume_branch = None\n additional_colocated_branches = None\n vcs_type = vcs_info.get('vcs_type')\n except asyncio.TimeoutError:\n logging.debug('Timeout opening branch %s', vcs_info['branch_url'])\n resume_branch = None\n additional_colocated_branches = None\n vcs_type = vcs_info.get('vcs_type')\n else:\n # We try the public branch first, since perhaps a maintainer\n # has made changes to the branch there.\n active_run.vcs_info[\"branch_url\"] = full_branch_url(main_branch).rstrip('/')\n additional_colocated_branches = await asyncio.to_thread(\n builder.additional_colocated_branches, main_branch)\n vcs_type = get_vcs_abbreviation(main_branch.repository)\n if not item.refresh:\n with span.new_child('resume-branch:open'):\n try:\n resume_branch = await to_thread_timeout(\n REMOTE_BRANCH_OPEN_TIMEOUT,\n open_resume_branch,\n main_branch,\n campaign_config.branch_name,\n item.codebase,\n possible_forges=possible_forges)\n except BranchRateLimited as e:\n host = urlutils.URL.from_string(e.url).host\n logging.warning('Rate limiting for %s: %r', host, e)\n await queue_processor.rate_limited(host, e.retry_after)\n await abort(active_run, 'resume-rate-limited', str(e))\n raise QueueRateLimiting(e.retry_after) from e\n except asyncio.TimeoutError:\n logging.debug('Timeout opening resume branch')\n resume_branch = None\n else:\n resume_branch = None\n else:\n vcs_type = vcs_info.get('vcs_type') or DEFAULT_VCS_TYPE\n resume_branch = None\n additional_colocated_branches = None\n\n if vcs_type is not None:\n vcs_type = vcs_type.lower()\n\n if resume_branch is None and not item.refresh and vcs_type is not None:\n with span.new_child('resume-branch:open'):\n try:\n vcs_manager = queue_processor.public_vcs_managers[vcs_type]\n except KeyError:\n logging.warning(\n 'Unsupported vcs %s for resume branch of %s',\n vcs_type, item.codebase)\n resume_branch = None\n else:\n try:\n resume_branch = await to_thread_timeout(\n VCS_STORE_BRANCH_OPEN_TIMEOUT,\n vcs_manager.get_branch,\n item.codebase, f'{campaign_config.name}/main',\n trace_context=span.context)\n except asyncio.TimeoutError:\n logging.warning('Timeout opening resume branch')\n\n if resume_branch is not None:\n with span.new_child('resume-branch:check'):\n resume = await check_resume_result(conn, item.campaign, resume_branch)\n if resume is not None:\n if is_authenticated_url(resume.branch.user_url):\n raise AssertionError(f'invalid resume branch {resume.branch}')\n active_run.resume_from = resume.run_id\n logging.info(\n 'Resuming %s/%s from run %s', item.codebase, item.campaign,\n resume.run_id)\n else:\n # If we can't find the matching run, then there's not much point in\n # resuming.\n # TODO(jelmer): Ideally we'd scan the resume branch for the latest\n # revision that we have a run for and that's not on mainline,\n # but we don't have access to the history here (and the worker doesn't have access\n # to recent runs). Maybe we can provide the worker with a revision => run_id dict\n # and let it determine resume_from?\n pass\n else:\n resume = None\n\n with span.new_child('build-env'):\n build_env = await builder.build_env(conn, campaign_config, item)\n\n with span.new_child('config'):\n build_config = await builder.config(conn, campaign_config, item)\n\n # Refresh the serialized copy of the active run, since we may have changed\n # it. Ideally we'd only do this once, but..\n await queue_processor.update_run(active_run)\n\n try:\n with span.new_child('cache-branch:check'):\n if campaign_config.HasField('debian_build'):\n distribution = get_distribution(\n config,\n campaign_config.debian_build.base_distribution)\n branch_name = cache_branch_name(distribution, \"main\")\n else:\n branch_name = \"main\"\n try:\n vcs_manager = queue_processor.public_vcs_managers[vcs_type]\n except KeyError:\n cached_branch_url = None\n target_repository_url = None\n else:\n cached_branch_url = vcs_manager.get_branch_url(\n item.codebase, branch_name)\n target_repository_url = vcs_manager.get_repository_url(item.codebase)\n except UnsupportedVcs:\n cached_branch_url = None\n target_repository_url = None\n\n env: dict[str, str] = {}\n env.update(build_env)\n if queue_processor.committer:\n env.update(committer_env(queue_processor.committer))\n\n extra_env, command = splitout_env(item.command)\n env.update(extra_env)\n\n assignment = {\n \"id\": active_run.log_id,\n \"description\": f\"{item.campaign} on {item.codebase}\",\n \"queue_id\": item.id,\n \"branch\": {\n \"default-empty\": campaign_config.default_empty,\n \"url\": vcs_info.get('branch_url'),\n \"subpath\": vcs_info.get('subpath'),\n \"vcs_type\": vcs_info.get('vcs_type'),\n \"cached_url\": cached_branch_url,\n \"additional_colocated_branches\": additional_colocated_branches,\n },\n \"resume\": resume.json() if resume else None,\n \"build\": {\n \"target\": builder.kind,\n \"environment\": build_env,\n \"config\": build_config,\n },\n \"command\": command,\n \"codebase\": item.codebase,\n \"codemod\": {\"command\": command, \"environment\": {}},\n \"env\": env,\n \"campaign\": item.campaign,\n \"force-build\": campaign_config.force_build,\n \"skip-setup-validation\": campaign_config.skip_setup_validation,\n \"target_repository\": {\n \"url\": target_repository_url,\n \"vcs_type\": vcs_info.get('vcs_type'),\n }\n }\n\n if mode == 'assign':\n pass\n else:\n await queue_processor.unclaim_run(active_run.log_id)\n return assignment\n\n\n@routes.get(\"/health\", name=\"health\")\nasync def handle_health(request):\n return web.Response(text=\"ok\")\n\n\n@routes.get(\"/ready\", name=\"ready\")\nasync def handle_ready(request):\n return web.Response(text=\"ok\")\n\n\nasync def finish(\n active_run: ActiveRun, queue_processor: QueueProcessor,\n request: web.Request) -> tuple[\n list[str], list[str], list[str], JanitorResult]:\n span = aiozipkin.request_span(request)\n worker_name = active_run.worker_name\n resume_from = active_run.resume_from\n\n reader = await request.multipart()\n worker_result = None\n\n filenames = []\n with tempfile.TemporaryDirectory(prefix='janitor-run') as output_directory:\n with span.new_child('read-files'):\n while True:\n part = await reader.next()\n if part is None:\n break\n if isinstance(part, MultipartReader):\n raise web.HTTPBadRequest(text='nested multi-part')\n if part.filename == \"result.json\":\n worker_result = WorkerResult.from_json(await part.json())\n elif part.filename is None:\n raise web.HTTPBadRequest(text=\"Part without filename\")\n else:\n filenames.append(part.filename)\n output_path = os.path.join(output_directory, part.filename)\n with open(output_path, \"wb\") as f:\n try:\n f.write(await part.read())\n except ConnectionResetError as e:\n raise web.HTTPBadRequest(text=str(e)) from e\n\n if worker_result is None:\n raise web.HTTPBadRequest(text=\"Missing result JSON\")\n\n logging.debug('worker result: %r', worker_result)\n\n if worker_name is None:\n worker_name = worker_result.worker_name\n\n with span.new_child('gather-logs'):\n logfiles = list(gather_logs(output_directory))\n\n logfilenames = [entry.name for entry in logfiles]\n\n result = JanitorResult(\n codebase=active_run.codebase,\n campaign=active_run.campaign,\n log_id=active_run.log_id,\n code='success',\n worker_name=worker_name,\n branch_url=worker_result.branch_url,\n vcs_type=worker_result.vcs_type,\n subpath=worker_result.subpath,\n worker_result=worker_result,\n logfilenames=logfilenames,\n resume_from=resume_from,\n change_set=active_run.change_set,\n )\n\n with span.new_child('import-logs'):\n await import_logs(\n logfiles,\n queue_processor.logfile_manager,\n active_run.codebase,\n active_run.log_id,\n mtime=result.finish_time.timestamp(),\n backup_logfile_manager=queue_processor.backup_logfile_manager,\n )\n\n if result.builder_result is not None:\n result.builder_result.from_directory(output_directory)\n\n artifact_names = result.builder_result.artifact_filenames()\n with span.new_child('upload-artifacts-with-backup'):\n try:\n await store_artifacts_with_backup(\n queue_processor.artifact_manager,\n queue_processor.backup_artifact_manager,\n output_directory,\n active_run.log_id,\n artifact_names,\n )\n except BaseException as e:\n result.code = \"artifact-upload-failed\"\n result.description = str(e)\n artifact_upload_failed_count.inc()\n # TODO(jelmer): Mark ourselves as unhealthy?\n artifact_names = None\n else:\n artifact_names = None\n\n with span.new_child('finish-run'):\n await queue_processor.finish_run(active_run, result)\n\n return (filenames, logfilenames, artifact_names, result)\n\n\n@routes.post(\"/active-runs/{run_id}/finish\", name=\"finish\")\nasync def handle_finish(request):\n queue_processor = request.app['queue_processor']\n run_id = request.match_info[\"run_id\"]\n active_run = await queue_processor.get_run(run_id)\n if not active_run:\n return web.json_response({'reason': f'no such run {run_id}'}, status=404)\n try:\n (filenames, logfilenames, artifact_names, result) = await finish(\n active_run, queue_processor, request)\n except RunExists as e:\n return web.json_response(\n {\"id\": run_id, \"result\": result.json(), 'reason': str(e)},\n status=409,\n )\n\n # TODO(jelmer): Set Location header to something; /runs/{run_id}= ?\n return web.json_response(\n {\"id\": run_id, \"filenames\": filenames,\n \"logs\": logfilenames,\n \"artifacts\": artifact_names, \"result\": result.json()},\n status=201,\n headers={\n 'Location': str(request.app.router['get-run'].url_for(\n run_id=run_id))\n }\n )\n\n\nasync def handle_public_get_active_run(request):\n queue_processor = request.app['queue_processor']\n run_id = request.match_info['run_id']\n active_run = await queue_processor.get_run(run_id)\n if not active_run:\n raise web.HTTPNotFound(text='no such run %s' % run_id)\n return web.json_response(active_run.json())\n\n\nasync def handle_public_finish(request):\n span = aiozipkin.request_span(request)\n queue_processor = request.app['queue_processor']\n run_id = request.match_info[\"run_id\"]\n active_run = await queue_processor.get_run(run_id)\n if not active_run:\n return web.json_response({'reason': f'no such run {run_id}'}, status=404)\n\n with span.new_child('check-worker-creds'):\n await check_worker_creds(request.app['database'], request)\n\n try:\n (filenames, logfilenames, artifact_names, result) = await finish(\n active_run, queue_processor, request)\n except RunExists as e:\n return web.json_response(\n {\"id\": run_id, 'reason': str(e)}, status=409,\n )\n\n return web.json_response(\n {\"id\": run_id, \"filenames\": filenames,\n \"logs\": logfilenames,\n \"artifacts\": artifact_names, \"result\": result.json()},\n status=201,\n )\n\n\nasync def handle_public_root(request):\n return web.Response(text='')\n\n\nasync def create_public_app(queue_processor, config, db, tracer=None):\n app = web.Application(middlewares=[\n state.asyncpg_error_middleware])\n app['config'] = config\n app['database'] = db\n app['queue_processor'] = queue_processor\n app.middlewares.insert(0, metrics_middleware)\n app.router.add_get('/', handle_public_root)\n app.router.add_post('/runner/active-runs', handle_public_assign)\n app.router.add_post(\n '/runner/active-runs/{run_id}/finish', handle_public_finish)\n app.router.add_get(\n '/runner/active-runs/{run_id}',\n handle_public_get_active_run,\n name='get-active-run')\n aiozipkin.setup(app, tracer)\n return app\n\n\nasync def create_app(queue_processor, config, db, tracer=None):\n app = web.Application(middlewares=[\n state.asyncpg_error_middleware])\n app.router.add_routes(routes)\n app['config'] = config\n app['database'] = db\n app['queue_processor'] = queue_processor\n app.middlewares.insert(0, metrics_middleware)\n metrics_route = app.router.add_get(\"/metrics\", metrics, name=\"metrics\")\n aiozipkin.setup(app, tracer, skip_routes=[metrics_route])\n return app\n\n\nasync def main(argv=None):\n import argparse\n\n parser = argparse.ArgumentParser(prog=\"janitor.runner\")\n parser.add_argument(\n \"--listen-address\", type=str, help=\"Listen address\", default=\"localhost\"\n )\n parser.add_argument(\"--port\", type=int, help=\"Listen port\", default=9911)\n parser.add_argument(\"--public-port\", type=int, help=\"Listen port\", default=9919)\n parser.add_argument(\n \"--pre-check\",\n help=\"Command to run to check whether to process codebase.\",\n type=str,\n )\n parser.add_argument(\n \"--post-check\", help=\"Command to run to check codebase before pushing.\", type=str\n )\n parser.add_argument(\n \"--use-cached-only\", action=\"store_true\", help=\"Use cached branches only.\"\n )\n parser.add_argument(\n \"--config\", type=str, default=\"janitor.conf\", help=\"Path to configuration.\"\n )\n parser.add_argument(\n \"--backup-directory\",\n type=str,\n default=None,\n help=(\n \"Backup directory to write files to if artifact or log \"\n \"manager is unreachable\"\n ),\n )\n parser.add_argument(\n \"--public-vcs-location\", type=str, default=None,\n help=\"Public vcs location (used for URLs handed to worker)\"\n )\n parser.add_argument(\n \"--public-apt-archive-location\", \n type=str,\n default=None,\n help=\"Base location for our own APT archive\")\n parser.add_argument(\"--public-dep-server-url\", type=str, default=None)\n parser.add_argument(\"--gcp-logging\", action='store_true', help='Use Google cloud logging.')\n parser.add_argument(\"--debug\", action=\"store_true\", help=\"Print debugging info\")\n parser.add_argument(\n \"--run-timeout\", type=int, help=\"Time before marking a run as having timed out (minutes)\",\n default=60)\n parser.add_argument(\n \"--avoid-host\", type=str,\n help=\"Avoid processing runs on a host (e.g. 'salsa.debian.org')\",\n default=[], action='append')\n args = parser.parse_args()\n\n if args.gcp_logging:\n import google.cloud.logging\n client = google.cloud.logging.Client()\n client.get_default_handler()\n client.setup_logging()\n else:\n if args.debug:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n debug.set_debug_flags_from_config()\n\n with open(args.config) as f:\n config = read_config(f)\n\n set_user_agent(config.user_agent)\n\n endpoint = aiozipkin.create_endpoint(\"janitor.runner\", ipv4=args.listen_address, port=args.port)\n if config.zipkin_address:\n tracer = await aiozipkin.create(config.zipkin_address, endpoint, sample_rate=0.1)\n else:\n tracer = await aiozipkin.create_custom(endpoint)\n trace_configs = [aiozipkin.make_trace_config(tracer)]\n\n try:\n public_vcs_managers = get_vcs_managers(\n args.public_vcs_location, trace_configs=trace_configs)\n except UnsupportedProtocol as e:\n parser.error(\n 'Unsupported protocol in --public-vcs-location: %s' % e.path)\n\n logfile_manager = get_log_manager(config.logs_location, trace_configs=trace_configs)\n artifact_manager = get_artifact_manager(config.artifact_location, trace_configs=trace_configs)\n\n loop = asyncio.get_event_loop()\n if args.debug:\n loop.set_debug(True)\n loop.slow_callback_duration = 0.001\n warnings.simplefilter('always', ResourceWarning)\n\n async with AsyncExitStack() as stack:\n await stack.enter_async_context(artifact_manager)\n await stack.enter_async_context(logfile_manager)\n if args.backup_directory:\n backup_logfile_directory = os.path.join(args.backup_directory, \"logs\")\n backup_artifact_directory = os.path.join(args.backup_directory, \"artifacts\")\n if not os.path.isdir(backup_logfile_directory):\n os.mkdir(backup_logfile_directory)\n if not os.path.isdir(backup_artifact_directory):\n os.mkdir(backup_artifact_directory)\n backup_artifact_manager = LocalArtifactManager(backup_artifact_directory)\n await stack.enter_async_context(backup_artifact_manager)\n backup_logfile_manager = FileSystemLogFileManager(backup_logfile_directory)\n await stack.enter_async_context(backup_logfile_manager)\n loop.create_task(\n upload_backup_artifacts(\n backup_artifact_manager, artifact_manager, timeout=60 * 15\n )\n )\n else:\n backup_artifact_manager = None\n backup_logfile_manager = None\n db = await state.create_pool(config.database_location)\n redis = Redis.from_url(config.redis_location)\n stack.push_async_callback(redis.close)\n queue_processor = QueueProcessor(\n db, redis,\n run_timeout=args.run_timeout,\n logfile_manager=logfile_manager,\n artifact_manager=artifact_manager,\n public_vcs_managers=public_vcs_managers,\n use_cached_only=args.use_cached_only,\n committer=config.committer,\n backup_artifact_manager=backup_artifact_manager,\n backup_logfile_manager=backup_logfile_manager,\n avoid_hosts=set(args.avoid_host),\n dep_server_url=args.public_dep_server_url,\n apt_archive_url=args.public_apt_archive_location,\n )\n\n queue_processor.start_watchdog()\n\n if args.public_port:\n public_app = await create_public_app(\n queue_processor, config, db, tracer=tracer)\n public_runner = web.AppRunner(public_app)\n await public_runner.setup()\n public_site = web.TCPSite(\n public_runner, args.listen_address, port=args.public_port)\n await public_site.start()\n\n app = await create_app(queue_processor, config, db, tracer=tracer)\n runner = web.AppRunner(app)\n await runner.setup()\n site = web.TCPSite(runner, args.listen_address, port=args.port)\n await site.start()\n while True:\n await asyncio.sleep(3600)\n\n\nif __name__ == \"__main__\":\n asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n sys.exit(asyncio.run(main(sys.argv)))\n","repo_name":"jelmer/janitor","sub_path":"janitor/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":110169,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"5"} +{"seq_id":"73962649112","text":"# input\nN = int(input())\n\nA = set()\n\n# 何bit目が1であるか定義しておく\nfor i in range(60) :\n if N & (1< (str, str):\n \"\"\"\n Choose a random animal and find its definition in WordNet.\n\n Returns\n -------\n (str, str)\n random_animal : str\n A random animal.\n definition : str\n Definition of a random animal.\n \"\"\"\n random_animal = random.choice(self.animals_list)\n synsets = wn.synsets(str(random_animal))\n definition = \"\"\n while True:\n if len(synsets) != 0:\n for synset in synsets:\n if synset.lexname() == 'noun.animal':\n definition = synset.definition()\n break\n else:\n random_animal = random.choice(self.animals_list)\n synsets = wn.synsets(str(random_animal))\n return random_animal, definition\n\n def riddle(self):\n \"\"\"Execute the riddle.\"\"\"\n animal, definition = self.random_data()\n print(\"\\n\", textwrap.fill(definition), \"\\n\")\n chances = 3\n while True:\n if input(\"The answer is: \").lower() in [animal, \"###\"]:\n self.book = True\n break\n elif chances == 1:\n print(\"\\nThe right answer is: \", str(animal), \"\\n\")\n animal, definition = self.random_data()\n print(\"\\n\", textwrap.fill(definition), \"\\n\")\n chances = 3\n else:\n chances -= 1\n","repo_name":"Darina23/Textbasiertes-Spiel-PRS-Projekt-","sub_path":"src/SecondRiddle.py","file_name":"SecondRiddle.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"33119867727","text":"import logging\nimport os\nimport signal\nimport sys\n\nimport dockercloud\n\nfrom config import DEBUG, PID_FILE, WORKERLISTGEN_SERVICE_URI, WORKERLISTGEN_CONTAINER_URI, API_AUTH\nfrom eventhandler import on_cloud_event, on_websocket_open, on_user_reload, on_websocket_close\nfrom workerlistgen import __version__\nfrom workerlistgencfg import run_workerlistgen\nfrom utils import save_to_file\n\ndockercloud.user_agent = \"dockercloud-workerlistgen/%s\" % __version__\n\nlogger = logging.getLogger(\"workerlistgen\")\n\n\ndef create_pid_file():\n pid = str(os.getpid())\n save_to_file(PID_FILE, pid)\n return pid\n\n\ndef set_autoreload(workerlistgen_container_uri, workerlistgen_service_uri, api_auth):\n autoreload = False\n if workerlistgen_container_uri and workerlistgen_service_uri:\n if api_auth:\n msg = \"dockercloud/workerlistgen %s has access to the cloud API - will reload list of backends\" \\\n \" in real-time\" % __version__\n autoreload = True\n else:\n msg = \"dockercloud/workerlistgen %s doesn't have access to the cloud API - you might want to\" \\\n \" give an API role to this service for automatic backend reconfiguration\" % __version__\n else:\n msg = \"dockercloud/workerlistgen %s is not running in Docker Cloud\" % __version__\n\n if autoreload:\n logger.info(msg)\n else:\n raise RuntimeError(msg)\n\n return autoreload\n\n\ndef listen_remote_events():\n events = dockercloud.Events()\n events.on_open(on_websocket_open)\n events.on_close(on_websocket_close)\n events.on_message(on_cloud_event)\n events.run_forever()\n\n\ndef main():\n logging.basicConfig(stream=sys.stdout)\n logging.getLogger(\"workerlistgen\").setLevel(logging.DEBUG if DEBUG else logging.INFO)\n\n signal.signal(signal.SIGUSR1, on_user_reload)\n signal.signal(signal.SIGTERM, sys.exit)\n\n autoreload = set_autoreload(WORKERLISTGEN_CONTAINER_URI, WORKERLISTGEN_SERVICE_URI, API_AUTH)\n\n pid = create_pid_file()\n logger.info(\"workerlistgen PID: %s\" % pid)\n\n if autoreload:\n listen_remote_events()\n else:\n raise RuntimeError('Must run in autoreload mode')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"citusdata/dockercloud-workerlist-gen","sub_path":"workerlistgen/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"5"} +{"seq_id":"11701396391","text":"from __future__ import annotations\n\nfrom typing import NoReturn\n\nimport numpy as np\nimport pandas as pd\nimport plotly.io\nfrom sklearn import datasets\n\nfrom IMLearn import BaseEstimator\nfrom IMLearn.metrics import mean_square_error\nfrom IMLearn.utils import split_train_test\nfrom IMLearn.model_selection import cross_validate\nfrom IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression\nfrom sklearn.linear_model import Lasso\n\nfrom utils import *\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nplotly.io.renderers.default = 'browser'\n\n\nclass LassoDummy(BaseEstimator):\n def __init__(self, lam: float):\n super().__init__()\n self._lam = lam\n\n def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:\n self.lasso = Lasso(self._lam, max_iter=2000)\n self.lasso.fit(X, y)\n\n def _predict(self, X: np.ndarray) -> np.ndarray:\n return self.lasso.predict(X)\n\n def _loss(self, X: np.ndarray, y: np.ndarray) -> float:\n return mean_square_error(self.lasso.predict(X), y)\n\n\ndef select_polynomial_degree(n_samples: int = 100, noise: float = 5):\n \"\"\"\n Simulate data from a polynomial model and use cross-validation to select the best fitting degree\n\n Parameters\n ----------\n n_samples: int, default=100\n Number of samples to generate\n\n noise: float, default = 5\n Noise level to simulate in responses\n \"\"\"\n\n # Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise\n # and split into training- and testing portions\n def f(x: int):\n return (x + 3) * (x + 2) * (x + 1) * (x - 1) * (x - 2)\n\n X = np.linspace(-1.2, 2, n_samples)\n y_noiseless = list(map(f, X))\n y = y_noiseless + np.random.normal(0, noise, n_samples)\n train_X, train_y, test_X, test_y = split_train_test(pd.DataFrame(X),\n pd.Series(y), train_proportion=2 / 3)\n\n train_X, train_y, test_X, test_y = train_X.sort_index().iloc[:, 0].to_numpy(), \\\n train_y.sort_index().to_numpy(), \\\n test_X.sort_index().iloc[:, 0].to_numpy(), \\\n test_y.sort_index().to_numpy()\n\n # plot noiseless vs noised data\n go.Figure(\n data=[go.Scatter(x=X, y=y_noiseless, name='true noiseless', mode='markers'),\n go.Scatter(x=train_X, y=train_y, name='train with noise', mode='markers'),\n go.Scatter(x=test_X, y=test_y, name='test with noise', mode='markers')],\n layout=go.Layout(\n title=fr'$\\text{{True vs Test and Train sets with normal noise }} \\sigma^2 = {noise}$',\n xaxis_title='x',\n yaxis_title='y',\n height=1000,\n width=1000)\n ).show()\n\n # Question 2 - Perform CV for polynomial fitting with degrees 0,1,...,10\n train_scores = np.zeros(11)\n validation_scores = np.zeros(11)\n for k in range(11):\n train_scores[k], validation_scores[k] = cross_validate(PolynomialFitting(k),\n train_X,\n train_y,\n mean_square_error)\n go.Figure(\n data=[\n go.Scatter(\n x=list(range(11)),\n y=validation_scores,\n name='Validation',\n mode='markers'\n ),\n go.Scatter(\n x=list(range(11)),\n y=train_scores,\n name='Train',\n mode='markers'\n )\n ],\n layout=go.Layout(\n title=fr'$\\text{{Validation and Train Scores for data with normal noise }} \\sigma^2 = {noise}$',\n xaxis_title='Polynomial Degree',\n yaxis_title='Loss',\n xaxis=dict(dtick=1),\n height=1000,\n width=1000)\n ).show()\n\n # Question 3 - Using best value of k, fit a k-degree polynomial model and report test error\n k = int(np.argmin(validation_scores))\n polyfit = PolynomialFitting(k)\n pred_y = polyfit.fit(train_X, train_y).predict(test_X)\n test_error = mean_square_error(test_y, pred_y)\n # print()\n print(f\"k^{k}, test error = {round(test_error, 2)} for {n_samples} samples with noise {noise}\")\n\n # go.Figure(\n # data=[go.Scatter(x=X, y=y_noiseless, name='true noiseless', mode='markers'),\n # go.Scatter(x=train_X, y=polyfit.predict(train_X), name='train prediction', mode='markers'),\n # go.Scatter(x=test_X, y=pred_y, name='test prediction', mode='markers')],\n # layout=go.Layout(\n # title=fr'$\\text{{True vs Test and Train sets predictions for data with normal noise }} \\sigma^2 = {noise}$',\n # xaxis_title='x',\n # yaxis_title='y',\n # height=1000,\n # width=1000)\n # ).show()\n\n\ndef select_regularization_parameter(n_samples: int = 50, n_evaluations: int = 500):\n \"\"\"\n Using sklearn's diabetes dataset use cross-validation to select the best fitting regularization parameter\n values for Ridge and Lasso regressions\n\n Parameters\n ----------\n n_samples: int, default=50\n Number of samples to generate\n\n n_evaluations: int, default = 500\n Number of regularization parameter values to evaluate for each of the algorithms\n \"\"\"\n # Question 6 - Load diabetes dataset and split into training and testing portions\n X, y = datasets.load_diabetes(return_X_y=True)\n train_X, train_y, test_X, test_y = X[:n_samples], y[:n_samples], \\\n X[n_samples:], y[n_samples:]\n\n # Question 7 - Perform CV for different values of the regularization parameter for Ridge and Lasso regressions\n lambda_min = 0.001\n lambda_max = 3\n lambda_range = np.linspace(lambda_min, lambda_max, n_evaluations)\n\n train_er_ind = 0\n val_er_ind = 1\n\n ridge_scores = np.zeros(shape=(n_evaluations, 2))\n lasso_scores = np.zeros(shape=(n_evaluations, 2))\n\n for i, lam in enumerate(lambda_range):\n ridge_scores[i] = cross_validate(RidgeRegression(lam), train_X, train_y, mean_square_error)\n lasso_scores[i] = cross_validate(LassoDummy(lam), train_X, train_y, mean_square_error)\n\n scores = [ridge_scores, lasso_scores]\n titles = [\"Ridge\", \"Lasso\"]\n fig = make_subplots(rows=1, cols=2,\n column_titles=[\n fr'$\\text{{Validation and Train Scores for {titles[i]} model }}$'\n for i in range(2)])\n for i in range(2):\n fig.add_traces(\n data=[\n go.Scatter(\n x=lambda_range,\n y=scores[i][:, val_er_ind],\n name=f'{titles[i]} Validation',\n mode='markers'\n ),\n go.Scatter(\n x=lambda_range,\n y=scores[i][:, train_er_ind],\n name=f'{titles[i]} Train',\n mode='markers'\n )\n ],\n rows=1,\n cols=i % 2 + 1\n )\n\n fig.update_layout(xaxis_title=r\"$\\lambda$\",\n yaxis_title=r\"$\\text{Loss}$\", )\n\n fig.show()\n\n best_lasso = lambda_range[np.argmin(lasso_scores[:, val_er_ind])]\n\n best_ridge = lambda_range[np.argmin(ridge_scores[:, val_er_ind])]\n\n print(\n f\"Lasso best lambda = {best_lasso}\\n\" +\n f\"Ridge best lambda = {best_ridge}\"\n )\n\n # Question 8 - Compare best Ridge model, best Lasso model and Least Squares model\n\n lasso = LassoDummy(lam=best_lasso).fit(train_X, train_y)\n ridge = RidgeRegression(lam=best_ridge).fit(train_X, train_y)\n lin = LinearRegression().fit(train_X, train_y)\n print(\n f\"Lasso Loss over Test: {lasso.loss(test_X, test_y)}\\n\" +\n f\"Ridge Loss over Test {ridge.loss(test_X, test_y)}\\n\"+\n f\"Least Squares Loss over Test {lin.loss(test_X, test_y)}\"\n )\n\n\nif __name__ == '__main__':\n np.random.seed(0)\n select_polynomial_degree()\n select_polynomial_degree(noise=0)\n select_polynomial_degree(n_samples=1500, noise=10)\n select_regularization_parameter()\n","repo_name":"OmriBenbenisty/IML.HUJI","sub_path":"exercises/perform_model_selection.py","file_name":"perform_model_selection.py","file_ext":"py","file_size_in_byte":8316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"5"} +{"seq_id":"10479877971","text":"import mysql.connector\r\nfrom mysql.connector import errorcode\r\ntry:\r\n db_conexão = mysql.connector.connect(host='localhost', user='root', password='', database='clientes')\r\n cursor = db_conexão.cursor()\r\n sql = 'CREATE TABLE IF NOT EXISTS pessoa(ID int not null auto_increment unique, Nome varchar(50), CPF varchar(15) unique, Telefone varchar(20), Conta_corrente varchar(15) unique, Agência varchar(10), Banco varchar(30),Cidade varchar(30), Sigla varchar (2), Nacionalidade varchar(20), Empresa int, Função varchar(20));'\r\n cursor.execute(sql)\r\n sql = 'INSERT INTO pessoa(Nome, CPF, Telefone, Conta_corrente, Agência, Banco, Cidade, Sigla, Nacionalidade, Empresa, Função) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'\r\n values = (str(input('Digite seu nome: ')),str(input('Digite o seu CPF: ')),str(input('Digite seu telefone: ')),str(input('Digite o número da conta corrente: ')),str(input('Digite a agência: ')),str(input('Digite o banco: ')),str(input('Digite a cidade: ')),str(input('Digite a sigla do estado: ')),str(input('Digite sua nacionalidade: ')),str(input('Digite o ID da empresa: ')), str(input('Digite sua função na empresa: ')))\r\n cursor.execute(sql, values)\r\n cursor.close()\r\n db_conexão.commit()\r\n db_conexão.close()\r\nexcept mysql.connector.Error as error:\r\n if error.errno == errorcode.ER_BAD_DB_ERROR:\r\n print('O banco de dados não existe!!')\r\n elif error.errno == errorcode.ER_ACCESS_DENIED_ERROR:\r\n print('Usuáro ou senha inválidos!!')\r\n else:\r\n print(error)\r\nelse:\r\n db_conexão.close()\r\n","repo_name":"Rafaelcoder18/Python-SQL","sub_path":"Cria a tabela e insere dados.py","file_name":"Cria a tabela e insere dados.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"10008872608","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# Define the function for computing the Mandelbrot set\ndef mandelbrot(c, max_iterations):\n z = 0\n n = 0\n while abs(z) <= 2 and n < max_iterations:\n z = z**2 + c\n n += 1\n if n == max_iterations:\n return 0\n else:\n return n\n\n# Set the limits of the plot\nxmin, xmax = -0.75, -0.745\nymin, ymax = 0.1, 0.105\n\n# Set the resolution of the plot\nresolution = 10000\n\n# Create the x and y coordinates\nx = np.linspace(xmin, xmax, resolution)\ny = np.linspace(ymin, ymax, resolution)\n\n# Create a grid of the x and y coordinates\nX, Y = np.meshgrid(x, y)\n\n# Compute the Mandelbrot set\nM = np.zeros((resolution, resolution))\nfor i in range(resolution):\n for j in range(resolution):\n c = X[i, j] + Y[i, j]*1j\n M[i, j] = mandelbrot(c, 1000)\n\n# Plot the Mandelbrot set\nplt.figure(figsize=(20,20))\nplt.imshow(M.T, extent=[xmin, xmax, ymin, ymax], cmap='jet', origin='lower')\nplt.axis('off')\nplt.show()\nplt.savefig('mandelbrot_high_res.png', dpi=300)\n","repo_name":"Babayaga7777/mandelbrot_test","sub_path":"mandelbrot_set image simulation.py","file_name":"mandelbrot_set image simulation.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"70600388311","text":"#!/usr/local/bin/python3\n\nimport unittest\nfrom PublishDNS import run_os_command\nfrom PublishDNS import poll_for_resolve\nfrom PublishDNS import poll_for_cname_update\n\n\nclass SimpleTest(unittest.TestCase):\n\n def test_run_os_command(self):\n self.assertEqual(run_os_command(\"echo true\"), \"true\\n\")\n self.assertEqual(run_os_command(\"/sad-notthere-6d0b115\"), -1)\n\n # Happy, lookup on www.google.com\n def test_poll_for_resolve(self):\n self.assertEqual(poll_for_resolve(\"www.google.com\", 1), 0)\n self.assertEqual(poll_for_resolve(\"www.doesntexit.zoo\", 1), -1)\n\n # Happy, lookup on www.microsoft.com\n # -> obviously if/when MS update their DNS...\n def test_poll_for_cname_update(self):\n self.assertEqual(poll_for_cname_update(\"www.microsoft.com\", \"www.microsoft.com-c-3.edgekey.net\", 1), 0)\n self.assertEqual(poll_for_cname_update(\"www.microsoft.com\", \"notthis\", 1), -1)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"bluebenno/AWS-things","sub_path":"test_PublishDNS.py","file_name":"test_PublishDNS.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"36098050015","text":"#coding=utf-8\n#!/usr/bin/python\nimport sys\nsys.path.append('..') \nfrom base.spider import Spider\n\nclass Spider(Spider):\n\tdef getDependence(self):\n\t\treturn ['py_ali']\n\tdef getName(self):\n\t\treturn \"py_zhaozy\"\n\tdef init(self,extend):\n\t\tself.ali = extend[0]\n\t\tprint(\"============py_zhaozy============\")\n\t\tpass\n\tdef isVideoFormat(self,url):\n\t\tpass\n\tdef manualVideoCheck(self):\n\t\tpass\n\tdef homeContent(self,filter):\n\t\tresult = {}\n\t\treturn result\n\tdef homeVideoContent(self):\n\t\tresult = {}\n\t\treturn result\n\tdef categoryContent(self,tid,pg,filter,extend):\n\t\tresult = {}\n\t\treturn result\n\theader = {\n\t\t\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.54 Safari/537.36\",\n\t\t\"Referer\": \"https://zhaoziyuan.me/\"\n\t}\n\tdef detailContent(self,array):\n\t\ttid = array[0]\n\t\tprint(self.getName())\n\t\tpattern = '(https://www.aliyundrive.com/s/[^\\\"]+)'\n\t\turl = self.regStr(tid,pattern)\n\t\tif len(url) > 0:\n\t\t\treturn self.ali.detailContent(array)\n\n\t\trsp = self.fetch('https://zhaoziyuan.me/'+tid)\n\t\turl = self.regStr(rsp.text,pattern)\n\t\tif len(url) == 0:\n\t\t\treturn \"\"\n\t\tnewArray = [url]\n\t\tprint(newArray)\n\t\treturn self.ali.detailContent(newArray)\n\n\tdef searchContent(self,key,quick):\n\t\tmap = {\n\t\t\t'7':'文件夹',\n\t\t\t'1':'视频'\n\t\t}\n\t\tja = []\n\t\tfor tKey in map.keys():\n\t\t\turl = \"https://zhaoziyuan.me/so?filename={0}&t={1}\".format(key,tKey)\n\t\t\trsp = self.fetch(url,headers=self.header)\n\t\t\troot = self.html(self.cleanText(rsp.text))\n\t\t\taList = root.xpath(\"//li[@class='clear']//a\")\n\t\t\tfor a in aList:\n\t\t\t\t# title = a.xpath('./h3/text()')[0] + a.xpath('./p/text()')[0]\n\t\t\t\ttitle = self.xpText(a,'./h3/text()') + self.xpText(a,'./p/text()')\n\t\t\t\tpic = 'https://img0.baidu.com/it/u=603086994,1727626977&fm=253&fmt=auto?w=500&h=667'\n\t\t\t\tjo = {\n\t\t\t\t\t'vod_id': self.xpText(a,'@href'),\n\t\t\t\t\t'vod_name': '[{0}]{1}'.format(key,title),\n\t\t\t\t\t'vod_pic': pic\n\t\t\t\t}\n\t\t\t\tja.append(jo)\n\t\tresult = {\n\t\t\t'list':ja\n\t\t}\n\t\treturn result\n\n\tdef playerContent(self,flag,id,vipFlags):\n\t\treturn self.ali.playerContent(flag,id,vipFlags)\n\n\tconfig = {\n\t\t\"player\": {},\n\t\t\"filter\": {}\n\t}\n\theader = {}\n\n\tdef localProxy(self,param):\n\t\treturn [200, \"video/MP2T\", action, \"\"]","repo_name":"Dong-learn9/TVBox-zyjk","sub_path":"plugin/py_zhaozy.py","file_name":"py_zhaozy.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","stars":578,"dataset":"github-code","pt":"5"} +{"seq_id":"8467082964","text":"import logging\nimport logging.handlers\nimport os\nimport sys\n\n\nclass Logger(object):\n \"\"\"The logger factory class. It is a template to help quickly create a log utility.\n Attributes:\n set_conf(log_file, use_stdout, log_level): this is a static method that returns a configured logger.\n get_logger(tag): this is a static method that returns a configured logger.\n \"\"\"\n __loggers = {}\n\n __use_stdout = True\n __log_file = \"\"\n __log_level = logging.DEBUG\n\n @staticmethod\n def config(log_file, use_stdout, log_level):\n \"\"\"set the config, where config is a ConfigParser object\n \"\"\"\n Logger.__use_stdout = use_stdout\n Logger.__log_level = log_level\n dirname = os.path.dirname(log_file)\n if (not os.path.isfile(log_file)) and (not os.path.isdir(dirname)):\n try:\n os.makedirs(dirname)\n except OSError as e:\n print(\"create path '%s' for logging failed: %s\" % (dirname, e))\n sys.exit()\n Logger.__log_file = log_file\n\n @staticmethod\n def get_logger(tag):\n \"\"\"return the configured logger object\n \"\"\"\n if tag not in Logger.__loggers:\n Logger.__loggers[tag] = logging.getLogger(tag)\n Logger.__loggers[tag].setLevel(Logger.__log_level)\n formatter = logging.Formatter(\n \"[%(name)s][%(levelname)s] %(asctime)s \"\n \"%(filename)s:%(lineno)s %(message)s\")\n file_handler = logging.handlers.TimedRotatingFileHandler(\n Logger.__log_file, when='H', interval=1, backupCount=0)\n file_handler.setLevel(Logger.__log_level)\n file_handler.setFormatter(formatter)\n file_handler.suffix = \"%Y%m%d%H%M.log\"\n Logger.__loggers[tag].addHandler(file_handler)\n if Logger.__use_stdout:\n stream_headler = logging.StreamHandler()\n stream_headler.setLevel(Logger.__log_level)\n stream_headler.setFormatter(formatter)\n Logger.__loggers[tag].addHandler(stream_headler)\n return Logger.__loggers[tag]\n","repo_name":"ApolloAuto/apollo","sub_path":"modules/tools/common/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":23653,"dataset":"github-code","pt":"5"} +{"seq_id":"38457268694","text":"\"\"\"\nA wrapper for lavavu\n\n\"\"\"\n\nimport logging\nfrom LoopStructural.utils import getLogger\nfrom LoopStructural.utils import LoopImportError\nlogger = getLogger(__name__)\n\ntry:\n import lavavu\n from lavavu.vutils import is_notebook\n#catch the import lavavu error and provide more information\nexcept ImportError:\n raise LoopImportError('lavavu',additional_information=\"Please install lavavu: pip install lavavu\")\nimport numpy as np\ntry:\n from skimage.measure import marching_cubes\nexcept ImportError:\n logger.warning(\"Using depreciated version of scikit-image\")\n from skimage.measure import marching_cubes_lewiner as marching_cubes\nfrom LoopStructural.modelling.features import GeologicalFeature\nfrom LoopStructural.utils.helper import create_surface, get_vectors, create_box\n\n# adapted/copied from pyvista for sphinx scraper\n_OPEN_VIEWERS = {}\n\ndef close_all():\n _OPEN_VIEWERS.clear()\n return True\n # for key, v in _OPEN_VIEWERS.items():\n # if not v._closed:\n # v.close()\n # v.deep_clean()\n # _OPEN_VIEWERS.clear()\n # return True\n##\nclass LavaVuModelViewer:\n def __init__(self, model=None, bounding_box=None, nsteps=None, vertical_exaggeration=1., **kwargs):\n \"\"\"\n A wrapper to plot LoopStructural object with lavavu\n\n Parameters\n ----------\n **kwargs : lavavu viewer kwargs\n\n\n objects : dictionary of objects that have been plotted\n \"\"\"\n # copied from pyvista\n if lavavu is None:\n logger.error(\"Lavavu isn't installed: pip install lavavu\")\n return\n self._id_name = \"{}-{}\".format(str(hex(id(self))), len(_OPEN_VIEWERS))\n _OPEN_VIEWERS[self._id_name] = self\n #\n self.lv = lavavu.Viewer(**kwargs)\n self.lv['orthographic'] = True\n self.lv.modelscale([1,1,vertical_exaggeration])\n self.objects = {}\n self.bounding_box = bounding_box\n self.nsteps = nsteps\n if model is not None:\n self.bounding_box = model.bounding_box\n self.nsteps = model.nsteps\n logger.debug(\"Using bounding box from model\")\n if self.bounding_box is None or self.nsteps is None:\n logger.error(\"Plot area has not been defined.\")\n self.bounding_box = np.array(self.bounding_box)\n self._nsteps = np.array(self.nsteps)\n self._model = model\n # prerotate to a nice view\n # self.lv.rotate([-57.657936096191406, -13.939384460449219, -6.758780479431152])\n \n def close(self):\n pass\n \n @property\n def model(self):\n return self._model\n \n @model.setter\n def model(self, model):\n if model is not None:\n self.bounding_box = np.array(model.bounding_box)\n self.nsteps = np.array(model.nsteps)\n self._model = model\n self._nelements = self.nsteps[0]*self.nsteps[1]*self.nsteps[2]\n logger.debug(\"Using bounding box from model\")\n @property\n def nelements(self):\n \"\"\"The number of elements to use for evaluating the isosurface\n\n Returns\n -------\n nelements : int\n number of elements to use for isosurfacing\n \"\"\"\n return self._nelements\n \n @nelements.setter\n def nelements(self, nelements : int):\n \"\"\"Setter for nelements, automatically caculates the number of equally sized elements\n to isosurface. Better than specifying step distance manually\n\n Parameters\n ----------\n nelements : int\n [description]\n \"\"\" \n box_vol = (self.bounding_box[1, 0]-self.bounding_box[0, 0]) * (self.bounding_box[1, 1]-self.bounding_box[0, 1]) * (self.bounding_box[1, 2]-self.bounding_box[0, 2])\n ele_vol = box_vol / nelements\n # calculate the step vector of a regular cube\n step_vector = np.zeros(3)\n step_vector[:] = ele_vol ** (1. / 3.)\n # step_vector /= np.array([1,1,2])\n # number of steps is the length of the box / step vector\n nsteps = np.ceil((self.bounding_box[1, :] - self.bounding_box[0, :]) / step_vector).astype(int)\n self.nsteps = nsteps\n logger.info(\"Using grid with dimensions {} {} {}\".format(nsteps[0],nsteps[1],nsteps[2]))\n\n @property\n def nsteps(self):\n return self._nsteps\n\n @nsteps.setter\n def nsteps(self,nsteps):\n self._nsteps = np.array(nsteps)\n \n def deep_clean(self):\n \"\"\"[summary]\n\n [extended_summary]\n \"\"\"\n self.lv.clear()\n self.lv.cleardata()\n pass\n \n def add_section(self, geological_feature=None, axis='x', value=None, **kwargs):\n \"\"\"\n\n Plot a section/map thru the model and paint with a geological feature\n\n Parameters\n ----------\n geological_feature : Geological feature\n The feature to paint the section with\n axis : string\n which axis, x,y,z\n value : float\n Where to make the section\n kwargs\n additional kwargs passes to lavavu for colourmaps etc\n\n Returns\n -------\n\n \"\"\"\n if axis == 'x':\n tri, yy, zz = create_surface(self.bounding_box[:, [1, 2]], self.nsteps[[1, 2]])\n xx = np.zeros(zz.shape)\n if value is None:\n value = np.nanmean(self.bounding_box[:, 0])\n xx[:] = value\n if axis == 'y':\n tri, xx, zz = create_surface(self.bounding_box[:, [0, 2]], self.nsteps[[0, 2]])\n yy = np.zeros(xx.shape)\n if value is None:\n value = np.nanmean(self.bounding_box[:, 1])\n yy[:] = value\n if axis == 'z':\n tri, xx, yy = create_surface(self.bounding_box[:, 0:2], self.nsteps[0:2])\n zz = np.zeros(xx.shape)\n if value is None:\n value = np.nanmean(self.bounding_box[:, 2])\n zz[:] = value\n if geological_feature == 'model' and self.model is not None:\n name = kwargs.get('name','model_section')\n else:\n name = kwargs.get('name', geological_feature.name)\n name = '{}_section_at_{}_of_{}'.format(axis,value,name)\n colour = kwargs.get('colour', 'red')\n\n # create an array to evaluate the feature on for the section\n points = np.zeros((len(xx), 3)) #\n points[:, 0] = xx\n points[:, 1] = yy\n points[:, 2] = zz\n\n surf = self.lv.triangles(name)\n surf.vertices(self.model.rescale(points,inplace=False))\n surf.indices(tri)\n logger.info(\"Adding %s section at %f\" % (axis, value))\n if geological_feature is None:\n surf.colours(colour)\n\n if geological_feature is not None and type(geological_feature) != str:\n if 'norm' in kwargs:\n surf.values(np.linalg.norm(\n geological_feature.evaluate_gradient(points), axis=1),\n geological_feature.name)\n else:\n surf.values(geological_feature.evaluate_value(points),\n geological_feature.name)\n surf[\"colourby\"] = geological_feature.name\n cmap = lavavu.cubehelix(100)\n if 'cmap' in kwargs:\n cmap = kwargs['cmap']\n logger.info(\"Colouring section with %s min: %f, max: %f\" % (\n geological_feature.name, geological_feature.min(), geological_feature.max()))\n surf.colourmap(cmap, range=[geological_feature.min(), geological_feature.max()])\n if geological_feature == 'model' and self.model is not None:\n name = kwargs.get('name','model_section')\n v = self.model.evaluate_model(points,scale=False)\n surf.values(v,\n name)\n surf[\"colourby\"] = name\n cmap = kwargs.get('cmap',lavavu.cubehelix(100))\n surf.colourmap(cmap)\n \n\n\n def add_isosurface(self, \n geological_feature, \n value = None, \n isovalue=None,\n paint_with=None, \n slices=None, \n colour='red', \n nslices=None, \n cmap=None, \n filename=None, \n names=None, \n colours=None, \n opacity=None,\n function=None,\n **kwargs):\n \"\"\" Plot the surface of a geological feature \n\n [extended_summary]\n\n Parameters\n ----------\n geological_feature : GeologicalFeature\n [description]\n value : float, optional\n\n isovalue : [type], optional\n [description], by default None\n paint_with : [type], optional\n [description], by default None\n slices : [type], optional\n [description], by default None\n colour : [type], optional\n [description], by default None\n nslices : [type], optional\n [description], by default None\n cmap : [type], optional\n [description], by default None\n filename: string, optional\n filename for exporting\n names: list, optional\n list of names same length as slices\n colours: list, optional\n list of colours same length as slices\n opacity: double, optional\n change the opacity of the surface(s)\n callback_function: \n called with verts, tri and surface name - e.g.\n callback_function(verts,tri,name)\n\n Returns\n -------\n [type]\n [description]\n \"\"\"\n if geological_feature is None:\n logger.error(\"Cannot add isosurface GeologicalFeature does not exist\")\n # update the feature to make sure its current\n \n\n\n # do isosurfacing of support using marching tetras/cubes\n x = np.linspace(self.bounding_box[0, 0], self.bounding_box[1, 0], self.nsteps[0])\n y = np.linspace(self.bounding_box[0, 1], self.bounding_box[1, 1], self.nsteps[1])\n z = np.linspace(self.bounding_box[1, 2], self.bounding_box[0, 2], self.nsteps[2])\n xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')\n points = np.array([xx.flatten(), yy.flatten(), zz.flatten()]).T\n val = geological_feature.evaluate_value(points)\n # get the stats to check what we are plotting\n mean_property_val = np.nanmean(val)#geological_feature.mean()\n min_property_val = np.nanmin(val)#geological_feature.min()\n max_property_val = np.nanmax(val)#geological_feature.max()\n # set default parameters\n slices_ = [mean_property_val]\n painter = None\n voxet = None\n tris = None\n nodes = None\n # parse kwargs for parameters\n if isovalue is not None:\n slices_ = [isovalue]\n if value is not None:\n slices_ = [value]\n if slices is not None:\n slices_ = slices\n if nslices is not None:\n var = max_property_val - min_property_val\n # buffer slices by 5%\n slices_ = np.linspace(min_property_val + var * 0.05,\n max_property_val - var * 0.05,\n nslices)\n\n if paint_with is not None:\n painter = paint_with\n\n region = kwargs.get('region', None)\n\n\n if region is not None:\n val[~region(np.array([xx.flatten(), yy.flatten(), zz.flatten()]).T)] = np.nan\n step_vector = np.array([x[1] - x[0], y[1] - y[0], z[1] - z[0]])\n for i, isovalue in enumerate(slices_):\n logger.info(\"Creating isosurface of %s at %f\" % (geological_feature.name, isovalue))\n\n if isovalue > np.nanmax(val) or isovalue < np.nanmin(val):\n logger.warning(\"Isovalue doesn't exist inside bounding box\")\n continue # return np.zeros((3, 1)).astype(int), np.zeros((3, 1))\n try:\n verts, faces, normals, values = marching_cubes(\n val.reshape(self.nsteps, order='C'),\n isovalue,\n spacing=step_vector)\n verts += np.array([self.bounding_box[0, 0], self.bounding_box[0, 1], self.bounding_box[1, 2]])\n self.model.rescale(verts)\n\n except (ValueError, RuntimeError) as e:\n print(e)\n logger.warning(\"Cannot isosurface {} at {}, skipping\".format(geological_feature.name,isovalue))\n continue\n\n \n name = geological_feature.name\n name = kwargs.get('name', name)\n name += '_iso_%f' % isovalue\n if names is not None and len(names) == len(slices_):\n name = names[i]\n if name in self.lv.objects:\n ii = 0\n newname = name+\"_{}\".format(ii)\n while newname in self.lv.objects:\n ii+=1\n newname = name+\"_{}\".format(ii)\n name = newname\n \n if colours is not None and len(colours) == len(slices_):\n colour=colours[i]\n if function is not None:\n function(verts,faces,name)\n if filename is not None:\n svalues = None\n # svalues[:] = np.nan\n try:\n import meshio\n meshio.write_points_cells(filename.format(name),\n verts,\n [(\"triangle\", faces)]\n )\n except ImportError:\n logger.error(\"Could not save surfaces, meshio is not installed\")\n\n surf = self.lv.triangles(name)\n surf.vertices(verts)\n surf.indices(faces)\n if painter is None:\n surf.colours(colour)\n if opacity is not None:\n # if opacity not isinstance(x, (int, float, complex)):\n # logger.warning(\"Opacity must be numeric\")\n # else:\n surf[\"opacity\"] = opacity\n if painter is not None:\n # add a property to the surface nodes for visualisation\n # calculate the mode value, just to get the most common value\n surfaceval = np.zeros(verts.shape[0])\n surfaceval[:] = painter.evaluate_value(self.model.scale(verts))\n if painter.name is geological_feature.name:\n logger.info(\"Setting surface value to %f\"%isovalue)\n surfaceval[:] = isovalue\n surf.values(surfaceval, painter.name)\n surf[\"colourby\"] = painter.name \n vmin = kwargs.get('vmin', min_property_val)\n vmax = kwargs.get('vmax', max_property_val)\n surf.colourmap(cmap, range=(vmin, vmax)) # nodes.shape[0]))\n \n def add_scalar_field(self, \n geological_feature, \n name=None, \n cmap='rainbow', \n vmin=None, \n vmax = None, \n opacity=None, \n **kwargs):\n \"\"\"Add a block the size of the model area painted with the scalar field value\n\n Parameters\n ----------\n geological_feature : GeologicalFeature\n the geological feature to colour the scalar field by\n name : string, optional\n Name of the object for lavavu, needs to be unique for the viewer object, by default uses feature name\n cmap : str, optional\n mpl colourmap reference, by default 'rainbow'\n vmin : double, optional\n minimum value of the colourmap, by default None\n vmax : double, optional\n maximum value of the colourmap, by default None\n opacity : double, optional\n change the opacity of the block\n \"\"\"\n if name == None:\n if geological_feature is None:\n name = 'unnamed scalar field'\n else:\n name = geological_feature.name + '_scalar_field'\n\n points, tri = create_box(self.bounding_box,self.nsteps)\n\n surf = self.lv.triangles(name)\n surf.vertices(self.model.rescale(points))\n surf.indices(tri)\n val =geological_feature.evaluate_value(self.model.scale(points))\n surf.values(val, geological_feature.name)\n surf[\"colourby\"] = geological_feature.name\n logger.info(\"Adding scalar field of %s to viewer. Min: %f, max: %f\" % (geological_feature.name,\n geological_feature.min(),\n geological_feature.max()))\n if vmin == None:\n vmin =np.nanmin(val)\n if vmax == None:\n vmax = np.nanmax(val)\n surf.colourmap(cmap, range=(vmin, vmax))\n\n def add_box(self,bounding_box,name,colour='red'):\n points, tri = create_box(bounding_box,self.nsteps)\n\n surf = self.lv.triangles(name)\n surf.vertices(self.model.rescale(points))\n surf.indices(tri)\n surf.colours(colour)\n\n def add_model(self, cmap = None, **kwargs):\n \"\"\"Add a block model painted by stratigraphic id to the viewer\n\n Calls self.model.evaluate_model() for a cube surrounding the model.\n\n Parameters\n ----------\n cmap : matplotlib cmap, optional\n colourmap name or object from mpl\n\n Notes\n ------\n It is sensible to increase the viewer step sizes before running this function to\n increase the resolution of the model as its not possible to interpolate a discrete\n colourmap and this causes the model to look like a lego block.\n You can update the model resolution by changing the attribute nsteps\n >>> viewer.nsteps = np.array([100,100,100])\n\n \"\"\"\n name = kwargs.get('name', 'geological_model')\n points, tri = create_box(self.bounding_box, self.nsteps)\n\n surf = self.lv.triangles(name)\n surf.vertices(self.model.rescale(points))\n surf.indices(tri)\n val = self.model.evaluate_model(points,scale=True)\n surf.values(val, 'model')\n surf[\"colourby\"] = 'model'\n \n if cmap is None:\n try:\n import matplotlib.colors as colors\n except ImportError:\n logger.warning(\"Cannot use predefined colours as I can't import matplotlib\")\n cmap = 'tab20'\n colours = []\n boundaries = []\n data = []\n for g in self.model.stratigraphic_column.keys():\n if g == 'faults':\n continue\n for u, v in self.model.stratigraphic_column[g].items():\n data.append((v['id'],v['colour']))\n colours.append(v['colour'])\n boundaries.append(v['id'])#print(u,v)\n cmap = colors.ListedColormap(colours).colors\n # else:\n # cmap = cm.get_cmap(cmap,n_units)\n\n \n # logger.info(\"Adding scalar field of %s to viewer. Min: %f, max: %f\" % (geological_feature.name,\n # geological_feature.min(),\n # geological_feature.max()))\n vmin = kwargs.get('vmin', np.nanmin(val))\n vmax = kwargs.get('vmax', np.nanmax(val))\n surf.colourmap(cmap, range=(vmin, vmax))\n\n def add_fault_displacements(self, cmap = 'rainbow', **kwargs):\n \"\"\"Add a block model painted by the fault displacement magnitude\n\n Calls fault.displacementfeature.evaluate_value(points) for all faults\n\n Parameters\n ----------\n cmap : matplotlib cmap, optional\n colourmap name or object from mpl\n\n Notes\n ------\n It is sensible to increase the viewer step sizes before running this function to\n increase the resolution of the model as its not possible to interpolate a discrete\n colourmap and this causes the model to look like a lego block.\n You can update the model resolution by changing the attribute nsteps\n >>> viewer.nsteps = np.array([100,100,100])\n\n \"\"\"\n \n name = kwargs.get('name', 'fault_displacements')\n points, tri = create_box(self.bounding_box, self.nsteps)\n\n surf = self.lv.triangles(name)\n surf.vertices(self.model.rescale(points))\n surf.indices(tri)\n vals = self.model.evaluate_fault_displacements(points)\n surf.values(vals, 'displacement')\n surf[\"colourby\"] = 'displacement'\n\n vmin = kwargs.get('vmin', np.nanmin(vals))\n vmax = kwargs.get('vmax', np.nanmax(vals))\n surf.colourmap(cmap, range=(vmin, vmax))\n \n def add_fault(self,fault,step=100):\n self.add_isosurface(fault,value=0,name=fault.name)\n self.add_vector_field(fault,locations=self.model.regular_grid()[::step])\n\n def unfault_grid(self,feature,grid=None):\n if grid is None:\n grid = self.model.regular_grid()\n # apply all faults associated with a feature to a regular grid\n self.add_value_data(self.model.rescale(grid,inplace=False),grid[:,2],name='Regular grid before faults',pointsize=10,)\n \n for f in feature.faults:\n grid = f.apply_to_points(grid)\n self.add_value_data(self.model.rescale(grid,inplace=False),grid[:,2],name='Regular grid after faults',pointsize=10,)\n\n def add_model_surfaces(self, \n strati=True, \n faults = True, \n cmap=None, \n fault_colour='black',\n displacement_cmap=None,\n **kwargs):\n \"\"\"Add surfaces for all of the interfaces in the model\n\n\n Parameters\n ----------\n strati : bool, optional\n whether to draw stratigraphy\n faults : bool, optional\n whether to draw faults, by default True\n cmap : string\n matplotlib cmap\n fault_colour : string\n colour string for faults\n displacement_cmap : string/None\n if string is specified uses this cmap to colour\n faults by displacement\n Notes\n ------\n Other parameters are passed to self.add_isosurface() \n\n \"\"\"\n try:\n from matplotlib import cm\n from matplotlib import colors\n except ImportError:\n logger.warning(\"Cannot add model surfaces without matplotlib \\n\")\n return\n from ..modelling.features import LambdaGeologicalFeature\n import time\n from tqdm.auto import tqdm\n start = time.time()\n n_units = 0 #count how many discrete colours\n name_suffix = kwargs.pop('name','')\n for g in self.model.stratigraphic_column.keys():\n if g in self.model.feature_name_index:\n for u in self.model.stratigraphic_column[g].keys():\n n_units+=1\n n_faults = 0\n for f in self.model.features:\n if f.type=='fault':\n n_faults+=1\n \n if cmap is None:\n \n colours = []\n boundaries = []\n data = []\n for g in self.model.stratigraphic_column.keys():\n if g == 'faults':\n # skip anything saved in faults here\n continue\n for u, v in self.model.stratigraphic_column[g].items():\n data.append((v['id'],v['colour']))\n colours.append(v['colour'])\n boundaries.append(v['id'])\n cmap = colors.ListedColormap(colours)\n else:\n cmap = cm.get_cmap('tab20',n_units)\n ci = 0\n cmap_colours = colors.to_rgba_array(cmap.colors)\n n_surfaces = 0\n if strati:\n n_surfaces+=n_units\n if faults:\n n_surfaces+=n_faults\n with tqdm(total=n_surfaces) as pbar:\n\n if strati:\n for g in self.model.stratigraphic_column.keys():\n if g in self.model.feature_name_index:\n feature = self.model.features[self.model.feature_name_index[g]]\n names = []\n values = []\n colours = []\n for u, vals in self.model.stratigraphic_column[g].items():\n names.append(u+name_suffix)\n values.append(vals['min'])\n colours.append(cmap_colours[ci,:])\n ci+=1\n pbar.set_description('Isosurfacing {}'.format(feature.name))\n self.add_isosurface(feature, slices=values,names=names,colours=colours,**kwargs)\n pbar.update(len(values))\n \n\n if faults:\n for f in self.model.features:\n if f.type == 'fault':\n def mask(x):\n val = f.displacementfeature.evaluate_value(x)\n val[np.isnan(val)] = 0\n maskv = np.zeros(val.shape).astype(bool)\n maskv[np.abs(val) > 0.001] = 1\n return maskv\n if f.name in self.model.stratigraphic_column['faults']:\n fault_colour = self.model.stratigraphic_column['faults'][f.name].get('colour',['red'])\n pbar.set_description('Isosurfacing {}'.format(f.name))\n if displacement_cmap is not None:\n fault_colour=[None]\n kwargs['cmap']=displacement_cmap\n kwargs['vmin'] = np.min(self.model.faults_displacement_magnitude)\n kwargs['vmax'] = np.max(self.model.faults_displacement_magnitude)\n kwargs['paint_with'] = LambdaGeologicalFeature(lambda xyz: np.zeros(xyz.shape[0])+f.displacement)\n # = feature\n region = kwargs.pop('region',None) \n self.add_isosurface(f,isovalue=0,region=mask,colour=fault_colour[0],name=f.name+name_suffix,**kwargs)\n pbar.update(1)\n print(\"Adding surfaces took {} seconds\".format(time.time()-start))\n def add_vector_field(self, geological_feature, **kwargs):\n \"\"\"\n\n Plot the gradient of a geological feature at given locations\n\n Parameters\n ----------\n geological_feature : Geological Feature to evaluate gradient\n locations : ((N,3)) array of evaluation locations\n kwargs : kwargs for lavavu vector\n\n Returns\n -------\n\n \"\"\"\n logger.info(\"Adding vector field for %s \" % (geological_feature.name))\n locations = kwargs.get('locations', None)\n if locations is None:\n x = np.linspace(self.bounding_box[0, 0], self.bounding_box[1, 0], self.nsteps[0])\n y = np.linspace(self.bounding_box[0, 1], self.bounding_box[1, 1], self.nsteps[1])\n z = np.linspace(self.bounding_box[1, 2], self.bounding_box[0, 2], self.nsteps[2])\n xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')\n locations = np.array([xx.flatten(), yy.flatten(), zz.flatten()]).T\n vector = geological_feature.evaluate_gradient(locations)\n # normalise\n mask = ~np.any(np.isnan(vector), axis=1)\n vector[mask, :] /= np.linalg.norm(vector[mask, :], axis=1)[:, None]\n vectorfield = self.lv.vectors(geological_feature.name + \"_grad\",\n **kwargs)\n vectorfield.vertices(self.model.rescale(locations[mask, :],inplace=False))\n vectorfield.vectors(vector[mask, :])\n return\n\n def add_data(self, feature, disks=True, vectors = False,**kwargs):\n \"\"\"\n\n Plot the data linked to the feature, can choose whether to plot all data types\n using value and grad kwargs\n\n Parameters\n ----------\n feature\n kwargs\n\n Returns\n -------\n\n \"\"\"\n name = feature.name\n add_grad = True\n add_value = True\n add_tang = True\n add_interface = True\n if 'name' in kwargs:\n name = kwargs['name']\n del kwargs['name']\n if 'grad' in kwargs:\n add_grad = kwargs['grad']\n if 'value' in kwargs:\n add_value = kwargs['value']\n if 'tang' in kwargs:\n add_tang = kwargs['tang']\n if 'interface' in kwargs:\n add_interface = kwargs['interface']\n grad = feature.builder.get_gradient_constraints()\n norm = feature.builder.get_norm_constraints()\n value = feature.builder.get_value_constraints()\n tang = feature.builder.get_tangent_constraints()\n interface = feature.builder.get_interface_constraints()\n\n if grad.shape[0] > 0 and add_grad:\n if disks:\n self.add_orientation_disks(self.model.rescale(grad[:, :3],inplace=False), grad[:, 3:6], name + \"_grad_cp\",\n **kwargs)\n if vectors:\n self.add_vector_data(self.model.rescale(grad[:, :3],inplace=False), grad[:, 3:6], name + \"_grad_cp\",\n **kwargs)\n\n if norm.shape[0] > 0 and add_grad:\n if disks:\n self.add_orientation_disks(self.model.rescale(norm[:, :3],inplace=False), norm[:, 3:6], name + \"_norm_cp\",\n **kwargs)\n if vectors:\n self.add_vector_data(self.model.rescale(norm[:, :3],inplace=False), norm[:, 3:6], name + \"_norm_cp\",\n **kwargs)\n if value.shape[0] > 0 and add_value:\n kwargs['range'] = [feature.min(), feature.max()]\n self.add_value_data(self.model.rescale(value[:, :3],inplace=False), value[:, 3], name + \"_value_cp\",\n **kwargs)\n if tang.shape[0] > 0 and add_tang:\n self.add_vector_data(self.model.rescale(tang[:, :3],inplace=False), tang[:, 3:6], name + \"_tang_cp\",\n **kwargs)\n if interface.shape[0] > 0 and add_interface:\n self.add_points(self.model.rescale(interface[:,:3],inplace=False), name + \"_interface_cp\")\n\n def add_intersection_lineation(self, feature, **kwargs):\n name = feature.name\n if 'name' in kwargs:\n name = kwargs['name']\n del kwargs['name']\n intersection = feature.fold.foldframe.calculate_intersection_lineation(\n feature.builder)\n gpoints = feature.builder.interpolator.get_gradient_constraints()[:,:6]\n npoints = feature.builder.interpolator.get_norm_constraints()[:,:6]\n points = []\n if gpoints.shape[0] > 0:\n points.append(gpoints)\n if npoints.shape[0] > 0:\n points.append(npoints)\n points = np.vstack(points)\n if intersection.shape[0] > 0:\n self.add_vector_data(self.model.rescale(points[:,:3],inplace=False), intersection, name + \"_intersection\")\n \n def add_points(self, points, name, **kwargs):\n \"\"\"\n\n Plot points location in the lavavu viewer\n\n Parameters\n ----------\n points : numpy array of the points locations\n name : string name of the object for lavavu\n **kwargs : lavavu points kwargs\n\n Returns\n -------\n\n \"\"\"\n p = self.lv.points(name, **kwargs)\n p.vertices(points)\n\n def add_vector_data(self, position, vector, name, **kwargs):\n \"\"\"\n\n Plot point data with a vector component into the lavavu viewer\n\n Parameters\n ----------\n position : numpy array N,3 for xyz locations\n vector : numpy array of vector N,3\n name : string name for the object in lavavu\n kwargs to pass to lavavu\n\n Returns\n -------\n\n \"\"\"\n if 'colour' not in kwargs:\n kwargs['colour'] = 'black'\n # normalise\n if position.shape[0] > 0:\n vector /= np.linalg.norm(vector, axis=1)[:, None]\n vectorfield = self.lv.vectors(name, **kwargs)\n vectorfield.vertices(position)\n vectorfield.vectors(vector)\n return\n def add_orientation_disks(self,position,vector,name,symb_scale=1.,scaleshapes=None,shapelength=0,**kwargs):\n if 'colour' not in kwargs:\n kwargs['colour'] = 'black'\n # normalise\n if scaleshapes is None:\n scaleshapes = np.max(self.model.maximum-self.model.origin)*0.014*symb_scale\n if position.shape[0] > 0:\n vector /= np.linalg.norm(vector, axis=1)[:, None]\n vectorfield = self.lv.shapes(name, scaleshapes=scaleshapes,shapelength=shapelength,**kwargs)\n vectorfield.vertices(position)\n vectorfield.vectors(vector)\n return\n\n def add_value_data(self, position, value, name, **kwargs):\n \"\"\"\n\n Plot points data with a value component\n\n Parameters\n ----------\n position : numpy array N,3 for xyz locations\n value : N array of values\n name : string name of the object for lavavu\n kwargs : kwargs to pass to lavavu\n\n Returns\n -------\n\n \"\"\"\n if \"pointtype\" not in kwargs:\n kwargs[\"pointtype\"] = \"sphere\"\n if \"pointsize\" not in kwargs:\n kwargs[\"pointsize\"] = 4\n # set the colour map to diverge unless user decides otherwise\n cmap = kwargs.get('cmap', \"rainbow\")\n p = self.lv.points(name, **kwargs)\n p.vertices(position)\n p.values(value, \"v\")\n p[\"colourby\"] = \"v\"\n\n if 'vmin' in kwargs and 'vmax' in kwargs:\n logger.info('vmin {} and vmax {}'.format(kwargs['vmin'],kwargs['vmax']))\n p.colourmap(cmap, range=(kwargs['vmin'],kwargs['vmax']))\n else:\n p.colourmap(cmap, range=(np.nanmin(value),np.nanmax(value)))\n\n def add_fold(self, fold, **kwargs):\n \"\"\"\n Draw the vector components of the fold at the locations\n\n Parameters\n ----------\n fold - fold object\n locations - numpy array of xyz\n\n Returns\n -------\n\n \"\"\"\n locations = kwargs.get('locations', None)\n if locations is None:\n x = np.linspace(self.bounding_box[0, 0], self.bounding_box[1, 0], self.nsteps[0])\n y = np.linspace(self.bounding_box[0, 1], self.bounding_box[1, 1], self.nsteps[1])\n z = np.linspace(self.bounding_box[1, 2], self.bounding_box[0, 2], self.nsteps[2])\n xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')\n locations = np.array([xx.flatten(), yy.flatten(), zz.flatten()]).T\n r2r, fold_axis, dgz = fold.get_deformed_orientation(locations)\n locations = self.model.rescale(locations,inplace=False)\n self.add_vector_data(locations, r2r, fold.name + '_direction', colour='red')\n self.add_vector_data(locations, fold_axis, fold.name + '_axis', colour='black')\n self.add_vector_data(locations, dgz, fold.name + '_norm', colour='green')\n\n def interactive(self, popout=False):\n \"\"\"\n Runs the lavavu viewer as either a jupyter notebook\n inline interactive viewer or as a separate window\n\n Returns\n -------\n\n \"\"\"\n if is_notebook() and popout is False:\n self.lv.control.Panel()\n self.lv.control.ObjectList()\n self.lv.control.show()\n if not is_notebook() or popout:\n self.lv.control.Panel()\n self.lv.control.ObjectList()\n self.lv.interactive()\n\n def add_support_box(self,geological_feature, paint=False, **kwargs):\n name = kwargs.get('name', geological_feature.name + '_support')\n box = np.vstack([geological_feature.interpolator.support.origin,geological_feature.interpolator.support.maximum])\n points, tri = create_box(box,self.nsteps)\n\n surf = self.lv.triangles(name)\n surf.vertices(self.model.rescale(points))\n surf.indices(tri)\n if paint:\n val =geological_feature.evaluate_value(self.model.scale(points))\n surf.values(val, geological_feature.name)\n surf[\"colourby\"] = geological_feature.name\n cmap = kwargs.get('cmap',lavavu.cubehelix(100))\n\n logger.info(\"Adding scalar field of %s to viewer. Min: %f, max: %f\" % (geological_feature.name,\n geological_feature.min(),\n geological_feature.max()))\n vmin = kwargs.get('vmin', np.nanmin(val))\n vmax = kwargs.get('vmax', np.nanmax(val))\n surf.colourmap(cmap, range=(vmin, vmax))\n def set_zscale(self,zscale):\n \"\"\" Set the vertical scale for lavavu\n\n just a simple wrapper for lavavu modelscale([xscale,yscale,zscale])\n\n Parameters\n ----------\n zscale : float\n vertical scale\n \"\"\"\n self.lv.modelscale([1,1,zscale])\n\n def set_viewer_rotation(self, rotation):\n \"\"\"\n Set the viewer rotation given a list of rotations x,y,z\n\n Parameters\n ----------\n rotation numpy array of 3 rotation\n\n Returns\n -------\n\n \"\"\"\n self.lv.rotate(rotation)\n\n def save(self, fname, **kwargs):\n \"\"\"\n Calls lavavu.Viewer.image to save the viewer current state as an image\n\n Parameters\n ----------\n fname - file name string including relative path\n kwargs - optional kwargs to give to lavavu e.g. transparent, resolution\n\n Returns\n -------\n\n \"\"\"\n self.lv.image(fname, **kwargs)\n\n def export_to_webgl(self,fname, **kwargs ):\n \n self.lv.webgl(fname,**kwargs)\n def display(self, fname=None, **kwargs):\n \"\"\"\n Calls the lv object display function. Shows a static image of the viewer inline.\n\n Returns\n -------\n\n \"\"\"\n if fname:\n self.lv.image(fname, **kwargs)\n \n self.lv.display()\n\n def image(self, name, **kwargs):\n \"\"\"\n Calls the lv object image function to save the display state\n\n Parameters\n ----------\n name : string\n name of the image file to save\n kwargs\n\n Returns\n -------\n\n \"\"\"\n self.lv.image(name)\n \n def image_array(self, **kwargs):\n \"\"\"Return the current viewer image image data as a numpy array\n\n Returns\n -------\n image : np.array\n image as a numpy array\n \"\"\"\n return self.lv.rawimage(**kwargs).data\n\n def rotatex(self, r):\n \"\"\"\n Rotate the viewer in the x plane\n\n Parameters\n ----------\n r : double\n degrees to rotate, can be +ve or -ve\n\n Returns\n -------\n\n \"\"\"\n self.lv.rotatex(r)\n\n def rotatey(self, r):\n \"\"\"\n Rotate the viewer in the Y plane\n\n Parameters\n ----------\n r : double\n degrees to rotate, can be +ve or -ve\n\n Returns\n -------\n\n \"\"\"\n self.lv.rotatey(r)\n\n def rotatez(self, r):\n \"\"\"\n Rotate the viewer in the z plane\n\n Parameters\n ----------\n r : double\n degrees to rotate, can be +ve or -ve\n\n Returns\n -------\n\n \"\"\"\n self.lv.rotatez(r)\n\n def rotate(self, r):\n \"\"\"\n Rotate by a vector of rotation angles\n\n Parameters\n ----------\n r : list/numpy array\n a vector of rotations\n\n Returns\n -------\n\n \"\"\"\n self.lv.rotate(r)\n\n @property\n def rotation(self):\n \"\"\"Accessor for the viewer rotation\n Returns\n -------\n list\n x,y,z rotations\n \"\"\"\n return self.lv['xyzrotate']\n \n @rotation.setter\n def rotation(self,xyz):\n \"\"\"Set the rotation of the viewer\n\n Parameters\n ----------\n xyz : list like\n x y z rotations\n \"\"\"\n self.lv.rotation(xyz)\n\n @property\n def border(self):\n \"\"\"The width of the border around the model area\n\n Returns\n -------\n border : double\n [description]\n \"\"\"\n return self.lv['border']\n \n @border.setter\n def border(self, border):\n \"\"\"Setter for the border\n\n Parameters\n ----------\n border : double\n set the thickness of the border around objects\n \"\"\"\n self.lv['border'] = border\n\n def clear(self):\n \"\"\"Remove all objects from the viewer\n \"\"\"\n self.lv.clear()\n @property\n def camera(self):\n return self.lv.camera()\n \n @camera.setter\n def camera(self,camera):\n self.lv.camera(camera)\n \n @property\n def xmin(self):\n return self.lv['xmin']\n \n @xmin.setter\n def xmin(self, xmin):\n self.lv['xmin'] = xmin\n\n @property\n def xmax(self):\n return self.lv['xmax']\n \n @xmax.setter\n def xmax(self, xmax):\n self.lv['xmax'] = xmax\n\n @property\n def ymin(self):\n return self.lv['ymin']\n \n @ymin.setter\n def ymin(self, ymin):\n self.lv['ymin'] = ymin\n\n @property\n def ymax(self):\n return self.lv['ymax']\n \n @ymax.setter\n def ymax(self, ymax):\n self.lv['ymax'] = ymax\n \n @property\n def zmin(self):\n return self.lv['zmax']\n\n @zmin.setter\n def zmin(self, zmin):\n self.lv['zmin'] = zmin\n\n @property\n def zmax(self):\n return self.lv['zmax']\n \n @zmax.setter\n def zmax(self, zmax):\n self.lv['zmax'] = zmax\n ","repo_name":"wgorczyk/LoopStructural","sub_path":"LoopStructural/visualisation/model_visualisation.py","file_name":"model_visualisation.py","file_ext":"py","file_size_in_byte":42419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"30530826118","text":"\nimport re\n\ndef camel_case(text: str) -> str:\n\n words = text.split('_')\n camel_name = words[0]\n camel_name += ''.join(list(map(str.capitalize, words[1:])))\n\n return camel_name\n\ndef snake_case(text: str) -> str:\n\n words = re.sub(r'([A-Z])', r\" \\1\", text).split()\n snakeName = '_'.join(list(map(str.lower, words)))\n\n return snakeName\n\nprint(camel_case(input(\"Input your text in camel_case.Transformed in snakeСase: \")))\nprint(snake_case(input(\"Input your text in SnakeCase. Transformed in camel_case: \")))","repo_name":"vadympopovych24/Learning_code","sub_path":"Lab7_7.py","file_name":"Lab7_7.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35084667931","text":"# -*- coding: utf-8 -*-\n\nimport sympy as sp\nimport mapiranjeQ9 as mp\nimport numpy as np\nimport pandas as pd\nimport os\n\n\n\nclass MatricaKrutostiQ9:\n\n def __init__(self):\n self.x, self.y = sp.symbols('x y') # x i y su u stvari ksi i eta, vezane za izoparametarski koordinatni sistem!\n self.E, self.ni, self.t = sp.symbols('E ni t')\n self.a, self.b = sp.symbols('a b')\n #Ravno stanje deformacije:\n #self.Ematrica = sp.Matrix([[self.E*(1-self.ni)/((1+self.ni)*(1-2*self.ni)), self.E*(1-self.ni)*self.ni/((1-self.ni**2)*(1-2*self.ni)), 0],\n # [self.E*(1-self.ni)*self.ni/((1-self.ni**2)*(1-2*self.ni)), self.E*(1-self.ni)/((1+self.ni)*(1-2*self.ni)), 0],\n # [0, 0, self.E/(2*(1 + self.ni))]\n # ])\n\n #Ravno stanje napona:\n self.Ematrica = sp.Matrix([[self.E/(1-self.ni**2), self.ni*self.E/(1-self.ni**2), 0],\n [self.ni*self.E/(1-self.ni**2), self.E/(1-self.ni**2), 0],\n [0, 0, (1-self.ni)/2*self.E/(1-self.ni**2)]\n ])\n\n# Interpolacione funkcije:\n def N9(self):\n N9 = (1 - self.x ** 2) * (1 - self.y ** 2)\n return N9\n\n def N8(self):\n N8 = 0.5 * (1 - self.x) * (1 - self.y ** 2) - 0.5 * self.N9()\n return N8\n\n def N7(self):\n N7 = 0.5 * (1 - self.x ** 2) * (1 + self.y) - 0.5 * self.N9()\n return N7\n\n def N6(self):\n N6 = 0.5 * (1 + self.x) * (1 - self.y ** 2) - 0.5 * self.N9()\n return N6\n\n def N5(self):\n N5 = 0.5 * (1 - self.x ** 2) * (1 - self.y) - 0.5 * self.N9()\n return N5\n\n def N4(self):\n N4 = 0.25 * (1 - self.x) * (1 + self.y) - 0.5 * self.N7() - 0.5 * self.N8() - 0.25 * self.N9()\n return N4\n\n def N3(self):\n N3 = 0.25 * (1 + self.x) * (1 + self.y) - 0.5 * self.N6() - 0.5 * self.N7() - 0.25 * self.N9()\n return N3\n\n def N2(self):\n N2 = 0.25 * (1 + self.x) * (1 - self.y) - 0.5 * self.N5() - 0.5 * self.N6()- 0.25 * self.N9()\n return N2\n\n def N1(self):\n N1 = 0.25 * (1 - self.x) * (1 - self.y) - 0.5 * self.N5() - 0.5 * self.N8() - 0.25 * self.N9()\n return N1\n\n# Jakobijeva matrica\n def Jakobijan(self):\n kt = mp.KoordinateTacaka() # ucitavanje klase iz skripte mapiranjeQ9.py\n niz_interpolacionih = sp.Matrix([self.N1(), self.N2(), self.N3(), self.N4(), self.N5(), self.N6(), self.N7(), self.N8(), self.N9()])\n x_niz = kt.x_niz()\n X = sp.transpose(x_niz)*niz_interpolacionih # Polje pomeranja u X pravcu\n X = X[0]\n y_niz = kt.y_niz()\n Y = sp.transpose(y_niz)*niz_interpolacionih # Polje pomeranja u Y pravcu\n Y = Y[0]\n J = sp.Matrix([[sp.diff(X, self.x), sp.diff(Y, self.x)], [sp.diff(X, self.y), sp.diff(Y, self.y)]])\n return J\n\n def JakobijanInv(self):\n J = self.Jakobijan()\n J = J.inv()\n return J\n\n def JakobijanDet(self):\n J = self.Jakobijan()\n J = J.det()\n return J\n\n\n#Kolone matrice B pri cemu su kolone matrice reda 3x2\n def B1(self):\n vektor = self.JakobijanInv()*sp.Matrix([[sp.diff(self.N1(), self.x)], [sp.diff(self.N1(), self.y)]])\n x = vektor[0]\n y = vektor[1]\n matrica = sp.Matrix([[x, 0], [0, y], [y, x]])\n return matrica\n\n def B2(self):\n vektor = self.JakobijanInv()*sp.Matrix([[sp.diff(self.N2(), self.x)], [sp.diff(self.N2(), self.y)]])\n x = vektor[0]\n y = vektor[1]\n matrica = sp.Matrix([[x, 0], [0, y], [y, x]])\n return matrica\n\n def B3(self):\n vektor = self.JakobijanInv()*sp.Matrix([[sp.diff(self.N3(), self.x)], [sp.diff(self.N3(), self.y)]])\n x = vektor[0]\n y = vektor[1]\n matrica = sp.Matrix([[x, 0], [0, y], [y, x]])\n return matrica\n\n def B4(self):\n vektor = self.JakobijanInv()*sp.Matrix([[sp.diff(self.N4(), self.x)], [sp.diff(self.N4(), self.y)]])\n x = vektor[0]\n y = vektor[1]\n matrica = sp.Matrix([[x, 0], [0, y], [y, x]])\n return matrica\n\n def B5(self):\n vektor = self.JakobijanInv()*sp.Matrix([[sp.diff(self.N5(), self.x)], [sp.diff(self.N5(), self.y)]])\n x = vektor[0]\n y = vektor[1]\n matrica = sp.Matrix([[x, 0], [0, y], [y, x]])\n return matrica\n\n def B6(self):\n vektor = self.JakobijanInv()*sp.Matrix([[sp.diff(self.N6(), self.x)], [sp.diff(self.N6(), self.y)]])\n x = vektor[0]\n y = vektor[1]\n matrica = sp.Matrix([[x,0], [0, y], [y, x]])\n return matrica\n\n def B7(self):\n vektor = self.JakobijanInv()*sp.Matrix([[sp.diff(self.N7(), self.x)],[sp.diff(self.N7(), self.y)]])\n x = vektor[0]\n y = vektor[1]\n matrica = sp.Matrix([[x, 0], [0, y], [y, x]])\n return matrica\n\n def B8(self):\n vektor = self.JakobijanInv()*sp.Matrix([[sp.diff(self.N8(), self.x)],[sp.diff(self.N8(), self.y)]])\n x = vektor[0]\n y = vektor[1]\n matrica = sp.Matrix([[x, 0], [0, y], [y, x]])\n return matrica\n\n def B9(self):\n vektor = self.JakobijanInv()*sp.Matrix([[sp.diff(self.N9(), self.x)], [sp.diff(self.N9(), self.y)]])\n x = vektor[0]\n y = vektor[1]\n matrica = sp.Matrix([[x, 0], [0, y], [y, x]])\n return matrica\n\n# Formiranje matrice B spajanjem matrica Bi:\n def Bmatrica(self):\n b = self.B1().col_insert(2, self.B2())\n b = b.col_insert(4, self.B3())\n b = b.col_insert(6, self.B4())\n b = b.col_insert(8, self.B5())\n b = b.col_insert(10, self.B6())\n b = b.col_insert(12, self.B7())\n b = b.col_insert(14, self.B8())\n b = b.col_insert(16, self.B9())\n return b\n\n# Transponovana B matrica:\n def BT(self):\n BT = sp.transpose(self.Bmatrica())\n return BT\n\n def Provera(self):\n fajlovi = os.listdir()\n if 'ElementiMatriceKrutosti.csv' in fajlovi:\n os.remove('ElementiMatriceKrutosti.csv')\n os.system('echo \"Elementi\" > ElementiMatriceKrutosti.csv')\n else:\n os.system('echo \"Elementi\" > ElementiMatriceKrutosti.csv')\n\n\n# Formiranje podintegralne f-je i integracija:\n def integracija(self):\n self.Provera()\n BT = self.BT()\n B = self.Bmatrica()\n K = np.array([], dtype=str)\n for i in np.arange(1,19):\n red= np.array([], dtype=str)\n for j in np.arange(1,19):\n podintegralna = BT.row(i - 1)*self.Ematrica*B.col(j - 1)*self.t*self.JakobijanDet()\n integral = sp.integrate(sp.integrate(podintegralna, (self.x, -1, 1)), (self.y, -1, 1))\n koeficijent_matrice = '=' + str(integral[0])\n koeficijent_matrice = koeficijent_matrice.replace('**','^')\n red = np.append(red, koeficijent_matrice)\n K = np.asarray(red)\n df = pd.DataFrame(K)\n df.to_csv('ElementiMatriceKrutosti.csv', encoding='UTF-8', mode='a', sep=';', header=False, index=False)\n\nobj = MatricaKrutostiQ9()\nobj.integracija()\n","repo_name":"nikolalakic/VKMKE","sub_path":"ElementiMatriceKrutosti.py","file_name":"ElementiMatriceKrutosti.py","file_ext":"py","file_size_in_byte":7073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72353926232","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\ndef get_qualified_name(function):\n # Python 3\n if hasattr(function, '__qualname__'):\n return function.__qualname__\n\n # Python 2\n if hasattr(function, 'im_class'):\n return function.im_class.__name__ + '.' + function.__name__\n return function.__name__\n\n\ndef add_notice_to_docstring(\n doc, instructions, no_doc_str, suffix_str, notice):\n \"\"\"Adds a deprecation notice to a docstring.\"\"\"\n if not doc:\n lines = [no_doc_str]\n else:\n lines = doc.splitlines()\n lines[0] += ' ' + suffix_str\n\n notice = [''] + notice + [instructions]\n\n if len(lines) > 1:\n # Make sure that we keep our distance from the main body\n if lines[1].strip():\n notice.append('')\n\n lines[1:1] = notice\n else:\n lines += notice\n\n return '\\n'.join(lines)\n\n\ndef validate_callable(func, decorator_name):\n if not hasattr(func, '__call__'):\n raise ValueError(\n '%s is not a function. If this is a property, make sure'\n ' @property appears before @%s in your source code:'\n '\\n\\n@property\\n@%s\\ndef method(...)' % (\n func, decorator_name, decorator_name))\n\n\nclass classproperty(object): # pylint: disable=invalid-name\n \"\"\"Class property decorator.\n\n Example usage:\n\n class MyClass(object):\n\n @classproperty\n def value(cls):\n return '123'\n\n > print MyClass.value\n 123\n \"\"\"\n\n def __init__(self, func):\n self._func = func\n\n def __get__(self, owner_self, owner_cls):\n return self._func(owner_cls)\n","repo_name":"tobegit3hub/deep_image_model","sub_path":"java_predict_client/src/main/proto/tensorflow/python/util/decorator_utils.py","file_name":"decorator_utils.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":100,"dataset":"github-code","pt":"5"} +{"seq_id":"15617444404","text":"#!/usr/bin/env python3\nfrom flystim.stim_server import launch_stim_server\nfrom flystim.screen import Screen, SubScreen\nfrom flystim.trajectory import Trajectory\nimport numpy as np\nfrom flystim.draw import draw_screens\n\nfrom time import sleep\n\n\ndef main():\n screen = Screen(fullscreen=False, server_number=0, id=0, vsync=False)\n\n # draw_screens(screen)\n\n manager = launch_stim_server(screen)\n\n # contrast-reversing grating\n # tf = 1 # Hz\n # t = np.linspace(0, 6, 100)\n # c = np.sin(2*np.pi*tf*t)\n # tv_pairs = list(zip(t, c))\n # contrast_traj = Trajectory(tv_pairs, kind='linear').to_dict()\n\n contrast_trajectory = {'name': 'Sinusoid',\n 'temporal_frequency': 1,\n 'amplitude': 1,\n 'offset': 0}\n\n\n manager.load_stim(name='CylindricalGrating', period=10, mean=0.5, contrast=contrast_trajectory, offset=0.0, profile='square',\n color=[1, 1, 1, 1], cylinder_radius=1, cylinder_height=10, theta=0, phi=0, angle=0)\n\n sleep(1)\n\n manager.start_stim()\n sleep(4)\n\n manager.stop_stim(print_profile=True)\n sleep(1)\n\nif __name__ == '__main__':\n main()\n","repo_name":"ClandininLab/flystim","sub_path":"examples/contrast_reversing_grating.py","file_name":"contrast_reversing_grating.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"19016063224","text":"'''\n\tThis is memoization approach of knapsack problem. First initialize a 2-d matrix of size W+1 and size of val array+1 with all the elemens as 0.\n\tNow we have option either to pick the element or not to pick.If the weght is less than the weight of the bag then only we can pick it.\n\tIf we pick the element we subtract its weight from the weight of the back and we add its value. We can only pick the item only if its\n\tweight is smaller than the weight of the bag \n\n\tOur answer will be the bottom right value in the matrix. \n'''\n\n\nval = [11, 14, 10, 45, 30]\nwt = [2, 5, 1, 3, 4]\nW = 7\nn = 5\n\nt = [[0 for i in range(W+1)] for j in range(n+1)]\n\ndef knapsack(wt, val, W, n):\n\tif n == 0 or W == 0:\n\t\treturn 0\n\n\tif t[n][W] != 0:\n\t\treturn t[n][W]\n\n\tif wt[n-1] <= W :\n\t\tt[n][W] = max(val[n-1] + knapsack(wt, val, W-wt[n-1], n-1), knapsack(wt, val, W, n-1))\n\t\treturn t[n][W]\n\n\telif wt[n-1] > W:\n\t\tt[n][W] = knapsack(wt, val, W, n-1)\n\t\treturn t[n][W]\n\nprint(knapsack(wt, val, W, n))","repo_name":"Ashish-012/Competitive-Coding","sub_path":"dp/knapsackRecursive.py","file_name":"knapsackRecursive.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"17182642429","text":"from flask import (Blueprint, redirect, render_template, request, send_file,\n url_for)\nfrom flask_login import login_required\nfrom flask_pymongo import pymongo\n\nfrom web_messaging.blueprints.billing.models import Bill\nfrom web_messaging.blueprints.billing.storage import retrieve_file_from_bucket\nfrom web_messaging.extensions import mongo\n\nbilling = Blueprint('billing', __name__, template_folder='templates')\n\n\n@billing.route(\"/billing/\", methods=['GET'])\n@login_required\ndef get_bill(filename):\n \"\"\" Return a bill as an attachment \"\"\"\n path = retrieve_file_from_bucket(filename)\n return send_file(path, as_attachment=True)\n\n\n@billing.route(\"/billing\", methods=['GET'])\n@login_required\ndef bills():\n \"\"\" Billing overview page \"\"\"\n collection = mongo.db['billing']\n cursor = collection.find()\n bills = cursor.sort(\"date\", pymongo.ASCENDING)\n return render_template(\"billing.html\", bills=bills)\n\n\ndef create_new_bill(file, billing_date, total_cost_usd):\n \"\"\" Create a new Bill object and upload that object on MongoDB\"\"\"\n new_bill = Bill(billing_date, int(total_cost_usd), file)\n new_bill.upload_to_gcs()\n mongo.db['billing'].insert_one(new_bill.dict())\n\n\n@billing.route(\"/upload-bill\", methods=['POST', 'GET'])\n@login_required\ndef upload_bills():\n \"\"\" Upload a new bill to the GCS \"\"\"\n if request.method == 'POST':\n if 'file' not in request.files:\n return 'No file part'\n file = request.files['file']\n billing_date = request.form['billing-date']\n total_cost_usd = request.form['total-cost-usd']\n create_new_bill(file, billing_date, total_cost_usd)\n return redirect(url_for('billing.bills'))\n","repo_name":"Pierre-Alexandre35/messaging-service-mousset","sub_path":"web_messaging/blueprints/billing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38849949516","text":"\"\"\"create table\n\nRevision ID: a7126e21f479\nRevises: \nCreate Date: 2020-06-21 20:46:07.015441\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a7126e21f479'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('gejala',\n sa.Column('id', sa.BigInteger(), autoincrement=True, nullable=False),\n sa.Column('gejala', sa.String(length=200), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_gejala_created_at'), 'gejala', ['created_at'], unique=False)\n op.create_index(op.f('ix_gejala_updated_at'), 'gejala', ['updated_at'], unique=False)\n op.create_table('penyakit',\n sa.Column('id', sa.BigInteger(), autoincrement=True, nullable=False),\n sa.Column('penyakit', sa.String(length=140), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_penyakit_created_at'), 'penyakit', ['created_at'], unique=False)\n op.create_index(op.f('ix_penyakit_updated_at'), 'penyakit', ['updated_at'], unique=False)\n op.create_table('penyakitgejala',\n sa.Column('penyakit_id', sa.BigInteger(), nullable=True),\n sa.Column('gejala_id', sa.BigInteger(), nullable=True),\n sa.ForeignKeyConstraint(['gejala_id'], ['gejala.id'], ),\n sa.ForeignKeyConstraint(['penyakit_id'], ['penyakit.id'], )\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('penyakitgejala')\n op.drop_index(op.f('ix_penyakit_updated_at'), table_name='penyakit')\n op.drop_index(op.f('ix_penyakit_created_at'), table_name='penyakit')\n op.drop_table('penyakit')\n op.drop_index(op.f('ix_gejala_updated_at'), table_name='gejala')\n op.drop_index(op.f('ix_gejala_created_at'), table_name='gejala')\n op.drop_table('gejala')\n # ### end Alembic commands ###\n","repo_name":"mbayuajis/depresi","sub_path":"migrations/versions/a7126e21f479_create_table.py","file_name":"a7126e21f479_create_table.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14003126983","text":"import numpy as np\nimport cv2\nfrom PIL import ImageGrab #Use ImageGrab in Pillow for Windows or MacOS environment.\nimport win32api\nimport win32gui\nimport win32con\nimport matplotlib.pylab as plt\n#import pyscreenshot as ImageGrab #Use pyscreenshot in Linux.\nfrom win32api import GetSystemMetrics\nimport time\n\n#Input libraries\nimport ctypes_functions\n\n#ML libraries\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\n##########COMPUTER VISION ROUTINES\nWIDTH = GetSystemMetrics(0)\nHEIGHT = GetSystemMetrics(1)\nW_ACTUAL = 1920\nH_ACTUAL = 1080\n\ndef screenCapture(windowName):\n \n cv2.imshow(windowName)\n\ndef _windowEnumerationHandler(hwnd, resultList):\n '''Pass to win32gui.EnumWindows() to generate list of window handle,\n window text, window class tuples.'''\n resultList.append((hwnd,\n win32gui.GetWindowText(hwnd),\n win32gui.GetClassName(hwnd)))\ndef findWindowHandle(WindowTitle):\n output = []\n topWindows = []\n win32gui.EnumWindows(_windowEnumerationHandler, topWindows)\n for i in topWindows:\n if WindowTitle in i[1]: output.append(i)\n else: None\n return output\n\ndef windowCoordinates(windowHandle):\n #win32gui.SetWindowPos(windowHandle, win32con.HWND_TOPMOST, 0,0,0,0,\n #win32con.SWP_NOMOVE | win32con.SWP_NOSIZE)\n x, y, x1, y1 = win32gui.GetClientRect(windowHandle)\n return np.array([win32gui.ClientToScreen(windowHandle, (x,y)), win32gui.ClientToScreen(windowHandle, (x1, y1))]).flatten() #win32gui.GetWindowRect(windowHandle) #win32gui.GetWindowPlacement(windowHandle)\n\ndef screenGrab(coord):\n return np.array(ImageGrab.grab(tuple(coord)))\n\ndef preprocess(image):\n output = np.average(np.array(image), axis=2)[:,123:640]\n return cv2.resize(output, dsize=(100, 100), interpolation=cv2.INTER_CUBIC)\n\n##########INPUT CONTROL\nVK_CODE = {'backspace':0x08,\n 'tab':0x09,\n 'clear':0x0C,\n 'enter':0x0D,\n 'shift':0x10,\n 'ctrl':0x11,\n 'alt':0x12,\n 'pause':0x13,\n 'caps_lock':0x14,\n 'esc':0x1B,\n 'spacebar':0x20,\n 'page_up':0x21,\n 'page_down':0x22,\n 'end':0x23,\n 'home':0x24,\n 'left_arrow':0x25,\n 'up_arrow':0x26,\n 'right_arrow':0x27,\n 'down_arrow':0x28,\n 'select':0x29,\n 'print':0x2A,\n 'execute':0x2B,\n 'print_screen':0x2C,\n 'ins':0x2D,\n 'del':0x2E,\n 'help':0x2F,\n '0':0x30,\n '1':0x31,\n '2':0x32,\n '3':0x33,\n '4':0x34,\n '5':0x35,\n '6':0x36,\n '7':0x37,\n '8':0x38,\n '9':0x39,\n 'a':0x41,\n 'b':0x42,\n 'c':0x43,\n 'd':0x44,\n 'e':0x45,\n 'f':0x46,\n 'g':0x47,\n 'h':0x48,\n 'i':0x49,\n 'j':0x4A,\n 'k':0x4B,\n 'l':0x4C,\n 'm':0x4D,\n 'n':0x4E,\n 'o':0x4F,\n 'p':0x50,\n 'q':0x51,\n 'r':0x52,\n 's':0x53,\n 't':0x54,\n 'u':0x55,\n 'v':0x56,\n 'w':0x57,\n 'x':0x58,\n 'y':0x59,\n 'z':0x5A,\n 'numpad_0':0x60,\n 'numpad_1':0x61,\n 'numpad_2':0x62,\n 'numpad_3':0x63,\n 'numpad_4':0x64,\n 'numpad_5':0x65,\n 'numpad_6':0x66,\n 'numpad_7':0x67,\n 'numpad_8':0x68,\n 'numpad_9':0x69,\n 'multiply_key':0x6A,\n 'add_key':0x6B,\n 'separator_key':0x6C,\n 'subtract_key':0x6D,\n 'decimal_key':0x6E,\n 'divide_key':0x6F,\n 'F1':0x70,\n 'F2':0x71,\n 'F3':0x72,\n 'F4':0x73,\n 'F5':0x74,\n 'F6':0x75,\n 'F7':0x76,\n 'F8':0x77,\n 'F9':0x78,\n 'F10':0x79,\n 'F11':0x7A,\n 'F12':0x7B,\n 'F13':0x7C,\n 'F14':0x7D,\n 'F15':0x7E,\n 'F16':0x7F,\n 'F17':0x80,\n 'F18':0x81,\n 'F19':0x82,\n 'F20':0x83,\n 'F21':0x84,\n 'F22':0x85,\n 'F23':0x86,\n 'F24':0x87,\n 'num_lock':0x90,\n 'scroll_lock':0x91,\n 'left_shift':0xA0,\n 'right_shift ':0xA1,\n 'left_control':0xA2,\n 'right_control':0xA3,\n 'left_menu':0xA4,\n 'right_menu':0xA5,\n 'browser_back':0xA6,\n 'browser_forward':0xA7,\n 'browser_refresh':0xA8,\n 'browser_stop':0xA9,\n 'browser_search':0xAA,\n 'browser_favorites':0xAB,\n 'browser_start_and_home':0xAC,\n 'volume_mute':0xAD,\n 'volume_Down':0xAE,\n 'volume_up':0xAF,\n 'next_track':0xB0,\n 'previous_track':0xB1,\n 'stop_media':0xB2,\n 'play/pause_media':0xB3,\n 'start_mail':0xB4,\n 'select_media':0xB5,\n 'start_application_1':0xB6,\n 'start_application_2':0xB7,\n 'attn_key':0xF6,\n 'crsel_key':0xF7,\n 'exsel_key':0xF8,\n 'play_key':0xFA,\n 'zoom_key':0xFB,\n 'clear_key':0xFE,\n '+':0xBB,\n ',':0xBC,\n '-':0xBD,\n '.':0xBE,\n '/':0xBF,\n '`':0xC0,\n ';':0xBA,\n '[':0xDB,\n '\\\\':0xDC,\n ']':0xDD,\n \"'\":0xDE,\n '`':0xC0}\n\ndef press(*args):\n '''\n one press, one release.\n accepts as many arguments as you want. e.g. press('left_arrow', 'a','b').\n '''\n for i in args:\n win32api.keybd_event(VK_CODE[i], 0,0,0)\n time.sleep(.05)\n win32api.keybd_event(VK_CODE[i],0 ,win32con.KEYEVENTF_KEYUP ,0)\n\ndef pressAndHold(*args):\n '''\n press and hold. Do NOT release.\n accepts as many arguments as you want.\n e.g. pressAndHold('left_arrow', 'a','b').\n '''\n for i in args:\n win32api.keybd_event(VK_CODE[i], 0,0,0)\n time.sleep(.05)\n \ndef pressHoldRelease(*args):\n '''\n press and hold passed in strings. Once held, release\n accepts as many arguments as you want.\n e.g. pressAndHold('left_arrow', 'a','b').\n\n this is useful for issuing shortcut command or shift commands.\n e.g. pressHoldRelease('ctrl', 'alt', 'del'), pressHoldRelease('shift','a')\n '''\n for i in args:\n win32api.keybd_event(VK_CODE[i], 0,0,0)\n time.sleep(.05)\n \n for i in args:\n win32api.keybd_event(VK_CODE[i],0 ,win32con.KEYEVENTF_KEYUP ,0)\n time.sleep(.1)\n \n \n\ndef release(*args):\n '''\n release depressed keys\n accepts as many arguments as you want.\n e.g. release('left_arrow', 'a','b').\n '''\n for i in args:\n win32api.keybd_event(VK_CODE[i],0 ,win32con.KEYEVENTF_KEYUP ,0)\n \n #NES controller configuration\n\ndef action(key_index):\n keys = [ctypes_functions.KEY_A,\n ctypes_functions.KEY_D,\n ctypes_functions.KEY_V,\n ctypes_functions.KEY_B]\n #win32api.keybd_event(ctypes_functions.VK_OEM_5, 0, 0, 0)\n for key in key_index:\n win32api.keybd_event(keys[key],0 ,0 ,0)\n time.sleep(0.01)\n #win32api.keybd_event(ctypes_functions.VK_OEM_5, 0, win32con.KEYEVENTF_KEYUP, 0)\n for key in key_index:\n win32api.keybd_event(keys[key],0 ,win32con.KEYEVENTF_KEYUP ,0)\n\ndef trade_off(action):\n keys = [[ctypes_functions.KEY_A, 0],\n [ctypes_functions.KEY_D, 0],\n [ctypes_functions.KEY_V, 0],\n [ctypes_functions.KEY_B, 0],\n [ctypes_functions.KEY_A, win32con.KEYEVENTF_KEYUP],\n [ctypes_functions.KEY_D, win32con.KEYEVENTF_KEYUP],\n [ctypes_functions.KEY_V, win32con.KEYEVENTF_KEYUP],\n [ctypes_functions.KEY_B, win32con.KEYEVENTF_KEYUP]]\n win32api.keybd_event(keys[action][0],0 ,keys[action][1] ,0)\n\ndef reset_input():\n keys = [[ctypes_functions.KEY_A, win32con.KEYEVENTF_KEYUP],\n [ctypes_functions.KEY_D, win32con.KEYEVENTF_KEYUP],\n [ctypes_functions.KEY_V, win32con.KEYEVENTF_KEYUP],\n [ctypes_functions.KEY_B, win32con.KEYEVENTF_KEYUP]]\n for i in keys:\n win32api.keybd_event(i[0],0 ,i[1] ,0) \n \ndef load_save_state():\n win32api.keybd_event(ctypes_functions.KEY_P,0 ,0 ,0)\n time.sleep(0.01)\n win32api.keybd_event(ctypes_functions.KEY_P,0 ,win32con.KEYEVENTF_KEYUP ,0)\n \ndef load_random_state():\n states = [ctypes_functions.KEY_1,\n ctypes_functions.KEY_2,\n ctypes_functions.KEY_3,\n ctypes_functions.KEY_4,\n ctypes_functions.KEY_5,\n ctypes_functions.KEY_6,\n ctypes_functions.KEY_7,\n ctypes_functions.KEY_8,\n ctypes_functions.KEY_9]\n chosen_state = np.random.choice(states)\n win32api.keybd_event(chosen_state,0 ,0 ,0)\n win32api.keybd_event(ctypes_functions.KEY_P,0 ,0 ,0)\n win32api.keybd_event(chosen_state,0 ,win32con.KEYEVENTF_KEYUP ,0)\n win32api.keybd_event(ctypes_functions.KEY_P,0 ,win32con.KEYEVENTF_KEYUP ,0)\n \n#def action(key_index): #Backup of full controller.\n#keys = [ctypes_functions.KEY_W,\n# ctypes_functions.KEY_S,\n# ctypes_functions.KEY_A,\n# ctypes_functions.KEY_D,\n# ctypes_functions.KEY_V,\n# ctypes_functions.KEY_B]\n#win32api.keybd_event(ctypes_functions.VK_OEM_5, 0, 0, 0)\n#win32api.keybd_event(keys[key_index],0 ,0 ,0)\n#time.sleep(0.05)\n#win32api.keybd_event(ctypes_functions.VK_OEM_5, 0, win32con.KEYEVENTF_KEYUP, 0)\n#win32api.keybd_event(keys[key_index],0 ,win32con.KEYEVENTF_KEYUP ,0)\n \n##########MEMORY ACCESS ROUTINES\n\n\n\n##########AI ROUTINES\n\nclass ANN():\n def __init__(self):\n num_actions = 8\n #NN layers\n image_input = layers.Input(shape=(4,100,100))\n vector_input = layers.Input(shape=(4,))\n preprocessor = layers.experimental.preprocessing.Resizing(100, 100, interpolation='bilinear', name=None)(image_input)\n # Convolutions on the frames on the screen\n layer1 = layers.Conv2D(32, 8, strides=4, activation=\"relu\")(preprocessor)\n layer2 = layers.Conv2D(64, 4, strides=2, activation=\"relu\")(layer1)\n layer3 = layers.Conv2D(64, 3, strides=1, activation=\"relu\")(layer2)\n layer4 = layers.Flatten()(layer3)\n concat_layers = layers.Concatenate()([vector_input, layer4])\n layer5 = layers.Dense(512, activation=\"relu\")(concat_layers)\n action = layers.Dense(num_actions, activation=\"linear\")(layer5)\n\n #Define NN parameters.\n self.toymodel = keras.Model(inputs=[image_input, vector_input], outputs=action)\n self.loss_fn = tf.keras.losses.Huber()\n self.optimizer = keras.optimizers.Adam(learning_rate=0.00025, clipnorm=1.0)\n self.toymodel.compile(self.optimizer, self.loss_fn)\n\n def trainStep(self, sample_X, sample_Y):\n with tf.GradientTape() as tape:\n old_q = self.toymodel(sample_X, training=True)\n loss_value = self.loss_fn(sample_Y, old_q)\n grads = tape.gradient(loss_value, self.toymodel.trainable_weights)\n self.optimizer.apply_gradients(zip(grads, self.toymodel.trainable_weights))\n return loss_value.numpy()\n\n def train(self, x_input, y_input, batchsize=64):\n loss_history = []\n dataset = tf.data.Dataset.from_tensor_slices((x_input, y_input))\n dataset = dataset.shuffle(buffer_size=1024).batch(batchsize)\n for steps, (x, y) in enumerate(dataset):\n loss_history.append(self.trainStep(x,y))\n return loss_history\n\n def forward(self, x_input):\n return self.toymodel(x_input)\n","repo_name":"cookie2004/GRLA","sub_path":"libs/vgbot.py","file_name":"vgbot.py","file_ext":"py","file_size_in_byte":11687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"36030665518","text":"import nose.tools\nimport pkgutil\nimport os, sys\nimport numpy\nimport functools\n\ndef test_kp66_to_mask():\n \"\"\"\n Test the mask constructions based on DRMF keypoints\n \"\"\"\n keypoints = numpy.zeros((66,2))\n\n # dummy keypoints that are considered to build the mask\n keypoints[1] = numpy.array([10, 10])\n keypoints[3] = numpy.array([20, 10])\n keypoints[5] = numpy.array([30, 10])\n keypoints[8] = numpy.array([50, 35])\n keypoints[11] = numpy.array([30, 60])\n keypoints[13] = numpy.array([20, 60])\n keypoints[15] = numpy.array([10, 60])\n keypoints[41] = numpy.array([10, 30])\n keypoints[47] = numpy.array([10, 40])\n\n # dummy image\n image = numpy.zeros((3, 100, 100), dtype='uint8')\n\n from bob.rppg.cvpr14.extract_utils import kp66_to_mask\n mask_points, mask = kp66_to_mask(image, keypoints, 10, False)\n assert numpy.array_equal(mask_points[0], numpy.array([10,15]))\n assert numpy.array_equal(mask_points[1], numpy.array([20,15]))\n assert numpy.array_equal(mask_points[2], numpy.array([30,15]))\n assert numpy.array_equal(mask_points[3], numpy.array([45,35]))\n assert numpy.array_equal(mask_points[4], numpy.array([30,55]))\n assert numpy.array_equal(mask_points[5], numpy.array([20,55]))\n assert numpy.array_equal(mask_points[6], numpy.array([10,55]))\n assert numpy.array_equal(mask_points[7], numpy.array([15,40]))\n assert numpy.array_equal(mask_points[8], numpy.array([15,30]))\n\n # check points that should be inside or outside the mask\n assert not mask[0,0]\n assert not mask[9,15]\n assert not mask[99, 99]\n assert mask[20, 16]\n assert mask[30, 30]\n assert mask[16, 40]\n \n\ndef opencv_available(test):\n \"\"\"Decorator for detecting if OpenCV/Python bindings are available\"\"\"\n from nose.plugins.skip import SkipTest\n\n @functools.wraps(test)\n def wrapper(*args, **kwargs):\n try:\n import cv2\n return test(*args, **kwargs)\n except ImportError:\n raise SkipTest(\"The cv2 module is not available\")\n\n return wrapper\n\n\n@opencv_available\ndef test_gftt():\n \"\"\"\n Tests the good features to track\n \"\"\"\n # white square on a black background\n image = numpy.zeros((3, 100, 100), dtype='uint8')\n image[:, 20:80, 20:80] = 255\n \n from bob.rppg.cvpr14.extract_utils import get_good_features_to_track \n corners = get_good_features_to_track(image, 4)\n assert numpy.array_equal(corners[0][0], numpy.array([79.0,79.0])), \"1st corner\"\n assert numpy.array_equal(corners[1][0], numpy.array([20.0,79.0])), \"2nd corner\"\n assert numpy.array_equal(corners[2][0], numpy.array([79.0,20.0])), \"3rd corner\"\n assert numpy.array_equal(corners[3][0], numpy.array([20.0,20.0])), \"4th corner\"\n\n\n@opencv_available\ndef test_track_features():\n \"\"\"\n Tests the track features functions\n \"\"\"\n\n # white square on a black background\n image1 = numpy.zeros((3, 100, 100), dtype='uint8')\n image1[:, 20:80, 20:80] = 255\n \n # white square on a black background - shifted by one pixel\n image2 = numpy.zeros((3, 100, 100), dtype='uint8')\n image2[:, 21:81, 21:81] = 255\n from bob.rppg.cvpr14.extract_utils import get_good_features_to_track \n points1 = get_good_features_to_track(image1, 4)\n from bob.rppg.cvpr14.extract_utils import track_features\n points2 = track_features(image1, image2, points1)\n points2 = numpy.rint(points2)\n assert numpy.array_equal(points2[0][0], numpy.array([80,80])), \"1st corner\"\n assert numpy.array_equal(points2[1][0], numpy.array([21,80])), \"2nd corner\"\n assert numpy.array_equal(points2[2][0], numpy.array([80,21])), \"3rd corner\"\n assert numpy.array_equal(points2[3][0], numpy.array([21,21])), \"4th corner\"\n \n\n@opencv_available\ndef test_find_transformation():\n \"\"\"\n Test the function to find the homographic transformation\n \"\"\"\n points1 = numpy.zeros((8,1, 2), dtype='int')\n points1[1, 0, :] = [1, 1] \n points1[2, 0, :] = [0, 1] \n points1[3, 0, :] = [1, 0]\n points1[4, 0, :] = [2, 2]\n points1[5, 0, :] = [2, 3]\n points1[6, 0, :] = [4, 3]\n points1[7, 0, :] = [5, 3]\n points2 = numpy.copy(points1)\n\n from bob.rppg.cvpr14.extract_utils import find_transformation\n mat = find_transformation(points1, points2)\n\n assert numpy.abs(mat[0, 0] - 1.0) < 1e-14 \n assert numpy.abs(mat[1, 1] - 1.0) < 1e-14 \n assert numpy.abs(mat[0, 1]) < 1e-14 \n assert numpy.abs(mat[1, 0]) < 1e-14 \n assert numpy.abs(mat[0, 2]) < 1e-14 \n assert numpy.abs(mat[1, 2]) < 1e-14 \n\n\ndef test_compute_average_color():\n \"\"\"\n Test the mean color computation inside a pre-defined area\n \"\"\"\n image = numpy.zeros((3, 100, 100), dtype='uint8')\n mask = numpy.zeros((100, 100), dtype='bool')\n mask[20:80, 20:80] = True\n image[1, :, :] = 128\n \n from bob.rppg.cvpr14.extract_utils import compute_average_colors_mask\n mean_green = compute_average_colors_mask(image, mask)[1]\n assert mean_green == 128\n\ndef test_rectify_illumination():\n \"\"\"\n Test the illumination rectification\n \"\"\"\n signal = numpy.ones(100)\n target = numpy.ones(100)\n from bob.rppg.cvpr14.illum_utils import rectify_illumination\n\n # signal and target are equal -> output is zero\n output = rectify_illumination(signal, target, 1, 1)\n assert numpy.array_equal(output, numpy.zeros(100))\n\n\ndef test_build_segments():\n \"\"\"\n Test the build segment function\n \"\"\"\n signal = numpy.zeros(100)\n length = 10\n \n from bob.rppg.cvpr14.motion_utils import build_segments\n segments, end_index = build_segments(signal, length)\n assert segments.shape == (10, 10)\n assert end_index == 100\n\n length = 11\n segments, end_index = build_segments(signal, length)\n assert segments.shape == (9, 11)\n assert end_index == 99\n\n\ndef test_prune_segments():\n \"\"\"\n Test the pruning of segments\n \"\"\"\n segments = numpy.random.randn(10, 10)\n segments[0] = numpy.random.randn(10) * 10.0\n segments[4] = numpy.random.randn(10) * 10.0\n \n from bob.rppg.cvpr14.motion_utils import prune_segments\n pruned, gaps, cut_index = prune_segments(segments, 2.0) \n\n # segments with high std should have been pruned\n assert pruned.shape == (8,10)\n \n # the first segment has been pruned, no gap should be accounted for\n assert not gaps[0]\n\n # the 5th segment has been pruned, a gap should be accounted for\n assert gaps[3]\n\n # two segments have been pruned\n assert len(cut_index) == 2\n # the first one\n assert cut_index[0] == (0, 10)\n # the fifth one\n assert cut_index[1] == (40, 50)\n\n\ndef test_build_final_signal():\n \"\"\"\n Test the building of the final signal\n \"\"\"\n segments = numpy.ones((10, 10))\n segments[4:] += 4\n gaps = [False] * 10\n gaps[4] = True\n\n from bob.rppg.cvpr14.motion_utils import build_final_signal\n signal = build_final_signal(segments, gaps)\n \n assert signal.shape[0] == 100\n assert numpy.array_equal(signal, numpy.ones(100))\n\ndef test_detrend():\n \"\"\"\n Test the detrend filter\n \"\"\"\n x = numpy.array(range(20))\n y = 2 + x\n \n # detrend of the signal\n # result should be more or less zero-mean and flat\n from bob.rppg.cvpr14.filter_utils import detrend\n filtered = detrend(y, 300)\n assert numpy.all(filtered < 1e-10)\n\ndef test_average():\n \"\"\"\n Test the average filter\n \"\"\"\n signal = numpy.random.randn(100)\n from bob.rppg.cvpr14.filter_utils import average\n filtered = average(signal, 1)\n # if the window is one, the signal should be unaltered\n assert numpy.array_equal(signal, filtered)\n \n signal = numpy.ones(100)\n filtered = average(signal, 5)\n # the signal is constant, so should be the result\n # after the window size has been reached\n assert filtered[0] == signal[0] / 5.0\n assert numpy.all(signal[5:] - filtered[5:] < 1e-15)\n filtered = average(signal, 17)\n assert filtered[0] == signal[0] / 17.0 \n assert numpy.all(signal[17:] - filtered[17:] < 1e-15)\n","repo_name":"bioidiap/bob.rppg.base","sub_path":"bob/rppg/cvpr14/script/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":7624,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"5"} +{"seq_id":"72342148633","text":"import functools\nimport queue\nimport re\nfrom typing import Dict, List, Union, Tuple, Iterable, Set\n\nfrom rnalysis.utils import parsing\n\n\nclass GOTerm:\n __slots__ = {'_id': 'GO ID', '_name': 'GO Term name',\n '_namespace': 'biological_process, cellular_component or molecular_function',\n '_level': \"GO Term's level in the DAG Tree\",\n 'relationships': 'direct parent relationships of the GO Term',\n 'children_relationships': 'direct children relationships of the GO Term'}\n\n def __init__(self):\n self._id: str = None\n self._name: str = None\n self._namespace: str = None\n self._level: int = None\n self.relationships: Dict[str, List[str]] = {'is_a': [], 'part_of': []}\n self.children_relationships: Dict[str, List[str]] = {'is_a': [], 'part_of': []}\n\n @classmethod\n def with_properties(cls, go_id: str, name: str, namespace: str, level: int):\n go_term = cls()\n go_term.set_id(go_id)\n go_term.set_name(name)\n go_term.set_namespace(namespace)\n go_term.set_level(level)\n return go_term\n\n @property\n def id(self) -> str:\n return self._id\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def namespace(self) -> str:\n return self._namespace\n\n @property\n def level(self) -> int:\n return self._level\n\n def set_id(self, go_id: str):\n self._id = go_id\n\n def set_name(self, name: str):\n self._name = name\n\n def set_namespace(self, namespace: str):\n self._namespace = namespace\n\n def set_level(self, level: int):\n self._level = level\n\n @functools.lru_cache(maxsize=2)\n def get_parents(self, relationships: Union[str, tuple] = ('is_a', 'part_of')) -> List[str]:\n relationships_filt = [rel for rel in parsing.data_to_list(relationships) if rel in self.relationships]\n go_ids = [go_id for rel in relationships_filt for go_id in self.relationships[rel]]\n return go_ids\n\n @functools.lru_cache(maxsize=2)\n def get_children(self, relationships: Union[str, Tuple[str]] = ('is_a', 'part_of')) -> List[str]:\n relationships_filt = [rel for rel in parsing.data_to_list(relationships) if rel in self.children_relationships]\n go_ids = [go_id for rel in relationships_filt for go_id in self.children_relationships[rel]]\n return go_ids\n\n\ndef parse_go_id(sequence: str) -> str:\n return re.findall(\"GO:[0-9]{7}\", sequence)[0]\n\n\nclass DAGTree:\n __slots__ = {'data_version': 'version of the go-basic.obo file',\n 'go_terms': 'dictionary of GO Terms in the DAG Tree',\n 'alt_ids': 'mapping of alternagive GO IDs to their main GO ID',\n 'namespaces': \"namespaces included in the DAGTree\",\n 'levels': 'list of levels in the DAG Tree',\n 'parent_relationship_types': 'the types of relationships that constitute parenthood in the DAG Tree',\n '_upper_induced_graphs': 'memoized upper-induced graphs'}\n\n def __init__(self, line_iterator: Iterable[str],\n parent_relationship_types: Union[str, Iterable[str]] = ('is_a', 'part_of')):\n self.data_version = None\n self.go_terms: Dict[str, GOTerm] = {}\n self.alt_ids: Dict[str, str] = {}\n self.namespaces: Set[str] = set()\n self.levels: List[dict] = []\n self.parent_relationship_types: tuple = parsing.data_to_tuple(parent_relationship_types)\n\n self._upper_induced_graphs: Dict[str, Set[str]] = {}\n\n self._parse_file(line_iterator)\n self._populate_levels()\n self._populate_children()\n\n def __getitem__(self, key) -> 'GOTerm':\n if key in self.go_terms:\n return self.go_terms[key]\n elif key in self.alt_ids:\n return self.go_terms[self.alt_ids[key]]\n raise KeyError(key)\n\n def __contains__(self, item):\n try:\n _ = self[item]\n return True\n except KeyError:\n return False\n\n def _parse_file(self, line_iterator: Iterable[str]):\n current_term = None\n in_frame = False\n for line in line_iterator:\n line = line.strip()\n if in_frame:\n if line.startswith('id: '):\n current_term.set_id(parse_go_id(line))\n elif line.startswith('namespace: '):\n current_term.set_namespace(line[11:])\n if current_term.namespace not in self.namespaces:\n self.namespaces.add(current_term.namespace)\n elif line.startswith('name: '):\n current_term.set_name(line[6:])\n elif line.startswith('alt_id: '):\n self.alt_ids[parse_go_id(line)] = current_term.id\n elif line.startswith('is_a: '):\n current_term.relationships['is_a'].append(parse_go_id(line))\n elif line.startswith('relationship: '):\n relationship_type = line.split(' ')[1]\n if relationship_type not in current_term.relationships:\n current_term.relationships[relationship_type] = []\n current_term.relationships[relationship_type].append(parse_go_id(line))\n elif line.startswith('is_obsolete: true'):\n in_frame = False\n elif line == '':\n self.go_terms[current_term.id] = current_term\n in_frame = False\n else:\n if line.startswith('[Term]'):\n current_term = GOTerm()\n in_frame = True\n elif line.startswith('data-version:'):\n self.data_version = line[14:]\n\n if in_frame: # add last go term to the set, if it was not already added\n self.go_terms[current_term.id] = current_term\n\n def _populate_levels(self):\n levels_dict = {}\n for go_term in self.go_terms.values():\n if go_term.level is None:\n go_term.set_level(self._get_term_level_rec(go_term))\n if go_term.level not in levels_dict:\n levels_dict[go_term.level] = {}\n levels_dict[go_term.level][go_term.id] = go_term\n if len(levels_dict) == 0:\n self.levels = [{}]\n else:\n self.levels = [levels_dict[i] for i in range(0, max(levels_dict.keys()) + 1)]\n\n def _get_term_level_rec(self, go_term: GOTerm):\n if go_term.level is not None:\n pass\n elif len(go_term.get_parents(self.parent_relationship_types)) == 0:\n go_term.set_level(0)\n else:\n go_term.set_level(1 + max([self._get_term_level_rec(self[parent_id]) for parent_id in\n go_term.get_parents(self.parent_relationship_types)]))\n return go_term.level\n\n def _populate_children(self):\n for go_id in self.level_iter():\n for rel_type in self.parent_relationship_types:\n for parent_id in self[go_id].get_parents(rel_type):\n if rel_type not in self[parent_id].children_relationships:\n self[parent_id].children_relationships[rel_type] = []\n self[parent_id].children_relationships[rel_type].append(go_id)\n\n def level_iter(self, namespace: str = 'all'):\n if namespace == 'all':\n for level in self.levels[::-1]:\n for go_id in level:\n yield go_id\n else:\n for level in self.levels[::-1]:\n for go_id in level:\n if self[go_id].namespace == namespace:\n yield go_id\n\n def upper_induced_graph_iter(self, go_id: str):\n if go_id in self._upper_induced_graphs:\n for upper_induced_node in self._upper_induced_graphs[go_id]:\n yield upper_induced_node\n\n else:\n # put go_id's parents into the queue\n node_queue = queue.SimpleQueue()\n processed_nodes = set()\n parents = self[go_id].get_parents(self.parent_relationship_types)\n for parent in parents:\n node_queue.put(parent)\n processed_nodes.update(parents)\n # iterate over the queue until it is empty (meaning we reached the top of the graph)\n while not node_queue.empty():\n this_node = node_queue.get()\n yield this_node\n # if this_node's upper-induced graph was already calculated, yield those unprocessed nodes\n if this_node in self._upper_induced_graphs:\n for upper_induced_node in self._upper_induced_graphs[this_node]:\n if upper_induced_node not in processed_nodes:\n yield upper_induced_node\n processed_nodes.update(self._upper_induced_graphs[this_node])\n # if this_node's upper-induced graph was yet to be calculated, add its unprocessed parents to the queue\n else:\n parents = self[this_node].get_parents(self.parent_relationship_types)\n for parent in parents:\n if parent not in processed_nodes:\n node_queue.put(parent)\n processed_nodes.update(parents)\n # memoize the function's output for go_id\n self._upper_induced_graphs[go_id] = processed_nodes\n","repo_name":"ilreeves/RNAlysis","sub_path":"rnalysis/utils/ontology.py","file_name":"ontology.py","file_ext":"py","file_size_in_byte":9557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"28158130253","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('partnerji', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='NastavitevPartnerja',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),\n ('partner', models.OneToOneField(to='partnerji.Partner')),\n ],\n options={\n 'verbose_name_plural': 'nastavitve',\n 'verbose_name': 'nastavitev',\n },\n ),\n ]\n","repo_name":"vasjapavlovic/eda5","sub_path":"eda5/nastavitve/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"41791357101","text":"# Python 3\nimport sys\n\ntest_cases = open(sys.argv[1], 'r')\nfor test in test_cases:\n if test == '':\n continue\n\n set1 = set(test.strip().split(';')[0].split(','))\n set2 = set(test.strip().split(';')[1].split(','))\n\n union_set = set1 & set2\n #print(union_set)\n \n union_list = list(union_set)\n union_list.sort()\n if len(union_list) != 0:\n print(','.join(c for c in union_list))\n \ntest_cases.close()","repo_name":"Phyllostachys/Katas","sub_path":"CodeEval/easy/set_intersection.py","file_name":"set_intersection.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38343471032","text":" \nfrom cgitb import grey\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimage_RGB=Image.open('lena24'+'.bmp')\nimage_RGB.show()\nimage_gris=image_RGB.convert('L')\nimage_gris.show()\narray_RGB=np.array(image_RGB)\nprint(np.shape(array_RGB))\narray_gris=np.array(image_gris)\nprint(np.shape(array_gris))\n\n\"\"\"\nplt.figure(1)\nplt.imshow(array_RGB)\nplt.title('image rgb 2')\nplt.show()\n\nplt.figure(2)\nplt.imshow(array_gris,cmap=\"gray\")\nplt.title('image gray 2')\nplt.show()\n\"\"\"\narray1_RGB=np.copy(array_RGB)\n\n#print(array_RGB[0,0,:])\n\nfor i in range(512) : array1_RGB[i,i,:] = [255,255,255]\n\n\"\"\"\"\nplt.figure(1)\nplt.imshow(array1_RGB)\nplt.title('image rgb 2')\nplt.show()\n\"\"\"\n\n\narray1_RGB[:,:,[0,1]] =0\n\n\nplt.figure(1)\nplt.imshow(array1_RGB)\nplt.title('image rgb 3')\nplt.show()","repo_name":"mehdi3199/tp_info","sub_path":"tp_py/tp2.py","file_name":"tp2.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"70075320792","text":"from django.urls import path\nfrom django.contrib.auth import get_user_model\nfrom django.utils.translation import gettext_lazy as _\nfrom rest_framework import serializers, exceptions\nfrom rest_framework_simplejwt import views as jwt_views, serializers as jwt\nfrom two_factor.utils import default_device\n\nimport karrio.server.openapi as openapi\n\nENDPOINT_ID = \"&&\" # This endpoint id is used to make operation ids unique make sure not to duplicate\nUser = get_user_model()\n\n\nclass AccessToken(serializers.Serializer):\n access = serializers.CharField()\n\n\nclass TokenPair(AccessToken):\n refresh = serializers.CharField()\n\n\nclass TokenObtainPairSerializer(jwt.TokenObtainPairSerializer):\n @classmethod\n def get_token(cls, user):\n token = super().get_token(user)\n\n # Set is_verified to False if the user has Two Factor enabled and confirmed\n token[\"is_verified\"] = False if default_device(user) else True\n\n return token\n\n def validate(self, attrs):\n data = super().validate(attrs)\n\n refresh = self.get_token(self.user)\n\n data[\"refresh\"] = str(refresh)\n data[\"access\"] = str(refresh.access_token)\n\n if jwt.api_settings.UPDATE_LAST_LOGIN:\n jwt.update_last_login(None, self.user)\n\n # Factor is enabled if False, so we need to send the token to the user\n if not refresh[\"is_verified\"]:\n default_device(self.user).generate_challenge()\n\n return data\n\n\nclass TokenRefreshSerializer(jwt.TokenRefreshSerializer):\n def validate(self, attrs: dict):\n refresh = jwt.RefreshToken(attrs[\"refresh\"])\n\n if not refresh[\"is_verified\"]:\n raise exceptions.AuthenticationFailed(\n {\"refresh\": _(\"This refresh token is not verified.\")},\n code=\"unverified_refresh_token\",\n )\n\n data = {\"access\": str(refresh.access_token), \"refresh\": str(refresh)}\n\n if jwt.api_settings.ROTATE_REFRESH_TOKENS:\n if jwt.api_settings.BLACKLIST_AFTER_ROTATION:\n try:\n # Attempt to blacklist the given refresh token\n refresh.blacklist()\n except AttributeError:\n # If blacklist app not installed, `blacklist` method will\n # not be present\n pass\n\n refresh.set_jti()\n refresh.set_exp()\n\n data[\"refresh\"] = str(refresh)\n\n return data\n\n\nclass VerifiedTokenObtainPairSerializer(jwt.TokenRefreshSerializer):\n otp_token = serializers.CharField(\n required=True,\n help_text=\"\"\"The OTP (One Time Password) token received by the user from the\n configured Two Factor Authentication method.\n \"\"\",\n )\n\n def validate(self, attrs):\n refresh = self.token_class(attrs[\"refresh\"])\n user = User.objects.get(id=refresh[\"user_id\"])\n refresh[\"is_verified\"] = self._validate_otp(attrs[\"otp_token\"], user)\n\n data = {\"access\": str(refresh.access_token), \"refresh\": str(refresh)}\n\n if jwt.api_settings.ROTATE_REFRESH_TOKENS:\n if jwt.api_settings.BLACKLIST_AFTER_ROTATION:\n try:\n # Attempt to blacklist the given refresh token\n refresh.blacklist()\n except AttributeError:\n # If blacklist app not installed, `blacklist` method will\n # not be present\n pass\n\n refresh.set_jti()\n refresh.set_exp()\n refresh.set_iat()\n\n data[\"refresh\"] = str(refresh)\n\n return data\n\n def _validate_otp(self, otp_token, user) -> bool:\n device = default_device(user)\n if device is None:\n raise exceptions.ValidationError(\n _(\"Two Factor authentication is not enabled for this user\"),\n code=\"otp_invalid\",\n )\n\n if device.verify_token(otp_token):\n return True\n\n raise exceptions.ValidationError(\n {\"otp_token\": _(\"Invalid or Expired OTP token\")}, code=\"otp_invalid\"\n )\n\n\nclass TokenObtainPair(jwt_views.TokenObtainPairView):\n serializer_class = TokenObtainPairSerializer\n\n @openapi.extend_schema(\n auth=[],\n tags=[\"API\"],\n operation_id=f\"{ENDPOINT_ID}authenticate\",\n summary=\"Obtain auth token pair\",\n description=\"Authenticate the user and return a token pair\",\n responses={201: TokenPair()},\n )\n def post(self, *args, **kwargs):\n response = super().post(*args, **kwargs)\n response[\"Cache-Control\"] = \"no-store\"\n response[\"CDN-Cache-Control\"] = \"no-store\"\n return response\n\n\nclass TokenRefresh(jwt_views.TokenRefreshView):\n serializer_class = TokenRefreshSerializer\n\n @openapi.extend_schema(\n auth=[],\n tags=[\"API\"],\n operation_id=f\"{ENDPOINT_ID}refresh_token\",\n summary=\"Refresh auth token\",\n description=\"Authenticate the user and return a token pair\",\n responses={201: TokenPair()},\n )\n def post(self, *args, **kwargs):\n response = super().post(*args, **kwargs)\n response[\"Cache-Control\"] = \"no-store\"\n response[\"CDN-Cache-Control\"] = \"no-store\"\n return response\n\n\nclass TokenVerify(jwt_views.TokenVerifyView):\n @openapi.extend_schema(\n auth=[],\n tags=[\"API\"],\n operation_id=f\"{ENDPOINT_ID}verify_token\",\n summary=\"Verify token\",\n description=\"Verify an existent authentication token\",\n responses={200: openapi.OpenApiTypes.OBJECT},\n )\n def post(self, *args, **kwargs):\n response = super().post(*args, **kwargs)\n response[\"Cache-Control\"] = \"no-store\"\n response[\"CDN-Cache-Control\"] = \"no-store\"\n return response\n\n\nclass VerifiedTokenPair(jwt_views.TokenVerifyView):\n serializer_class = VerifiedTokenObtainPairSerializer\n\n @openapi.extend_schema(\n auth=[],\n tags=[\"API\"],\n operation_id=f\"{ENDPOINT_ID}get_verified_token\",\n summary=\"Get verified JWT token\",\n description=\"Get a verified JWT token pair by submitting a Two-Factor authentication code.\",\n responses={201: TokenPair()},\n )\n def post(self, *args, **kwargs):\n response = super().post(*args, **kwargs)\n response[\"Cache-Control\"] = \"no-store\"\n response[\"CDN-Cache-Control\"] = \"no-store\"\n return response\n\n\nurlpatterns = [\n path(\"api/token\", TokenObtainPair.as_view(), name=\"jwt-obtain-pair\"),\n path(\"api/token/refresh\", TokenRefresh.as_view(), name=\"jwt-refresh\"),\n path(\"api/token/verify\", TokenVerify.as_view(), name=\"jwt-verify\"),\n path(\"api/token/verified\", VerifiedTokenPair.as_view(), name=\"verified-jwt-pair\"),\n]\n","repo_name":"karrioapi/karrio","sub_path":"server/main/karrio/server/urls/jwt.py","file_name":"jwt.py","file_ext":"py","file_size_in_byte":6761,"program_lang":"python","lang":"en","doc_type":"code","stars":323,"dataset":"github-code","pt":"5"} +{"seq_id":"7253459779","text":"from abc import ABC, abstractmethod\n\nfrom .runners import *\n\nclass RunnerFactory(ABC):\n def __init__(self, config_adaptor=lambda x: x, name=None) -> None:\n self.config_adaptor = config_adaptor\n self.name = name\n\n @abstractmethod\n def make(self, name, builder, config, i_run):\n pass\n\n\nclass ShellRunnerFactory(RunnerFactory):\n def __init__(self, run_cmd, run_cwd=None, stdout_termination_token=None, error_token=\"[ERROR]\", config_adaptor=lambda x: x, set_stop_event=False, name=None, environment={}) -> None:\n super().__init__(config_adaptor, name = name)\n self.run_cmd = run_cmd\n self.run_cwd = run_cwd\n self.stdout_termination_token = stdout_termination_token\n self.error_token = error_token\n self.set_stop_event = set_stop_event\n self.environment = environment\n\n def make(self, name, builder, config, i_run):\n if self.name:\n name = self.name # user-defined has higher priority\n return ShellRunner(name, i_run, self.run_cmd, self.run_cwd, self.stdout_termination_token, self.error_token, self.config_adaptor(config), self.set_stop_event, self.environment)\n\nclass DockerRunnerFactory(RunnerFactory):\n def __init__(self, run_cmd, stdout_termination_token=None, error_token=\"[ERROR]\", config_adaptor=lambda x: x, set_stop_event=False, name=None) -> None:\n super().__init__(config_adaptor, name = name)\n self.run_cmd = run_cmd\n self.stdout_termination_token = stdout_termination_token\n self.error_token = error_token\n self.set_stop_event = set_stop_event\n\n def make(self, name, builder, config, i_run):\n if self.name:\n name = self.name # user-defined has higher priority\n return DockerRunner(name, i_run, builder.image_name, self.run_cmd, self.stdout_termination_token, self.error_token, self.config_adaptor(config), self.set_stop_event)\n\nclass MultiShellRunnerFactory(RunnerFactory):\n # factories order by start order\n def __init__(self, factories, config_adaptor=lambda x: x, name=None):\n super().__init__(config_adaptor, name=name)\n self.factories = factories\n\n def make(self, name, builder, config, i_run):\n if self.name:\n name = self.name # user-defined has higher priority\n config = self.config_adaptor(config)\n runners = [factory.make(name, builder, config, i_run) for factory in self.factories]\n return OrderedMultiShellRunner(name, i_run, runners, config)","repo_name":"severus21/Vardac","sub_path":"benchmarks/src/factories.py","file_name":"factories.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"2750744103","text":"from xlrd import sheet\nfrom xlrd import open_workbook\nfrom xlrd import *\nimport logging\nimport datetime\nimport os\nimport xlrd\n\n\ndef getRowcount(FileName, Sheetname):\n rc=0\n try:\n wb=xlrd.open_workbook(FileName)\n sheet=wb.sheet_by_name(Sheetname)\n rc=sheet.nrows\n return rc\n except Exception as e:\n print(\"unble to find the work book\")\n\ndef getColumnCount(FileName, Sheetname):\n cc=0\n try:\n wb=xlrd.open_workbook(FileName)\n sheet=wb.sheet_by_name(Sheetname)\n cc=sheet.ncols\n return cc\n except Exception as e:\n print(\"unable to the get the column count\")\n\n\ndef getCelldata(FileName, Sheetname,ColName, RowNum):\n ColNum=0\n try:\n wb=xlrd.open_workbook(FileName)\n sheet=wb.sheet_by_name(Sheetname)\n\n firstRow=sheet.row_values(0)\n for data in firstRow:\n if data==ColName:\n break\n ColNum+=1\n\n celldata=sheet.cell_value(RowNum,ColNum)\n return celldata\n except Exception as e:\n print(\"unabel to fetch the data from the cell\")\n\n\ndef loadOSenvVariables(FileName,Sheetname):\n try:\n wb=xlrd.open_workbook(FileName)\n sheet=wb.sheet_by_name(Sheetname)\n\n ColoumnNames=sheet.row_values(0)\n ColoumnValues=sheet.row_values(1)\n\n for i in range(len(ColoumnNames)):\n os.environ[ColoumnNames[i]]=str(ColoumnValues[i])\n except Exception as e:\n print(\"Enter valid Filename and sheetName\")\n\n\n# '''\n# Write the Log error\n# '''\n# def writelog(message, loglevel):\ndef getDateTime():\n strDate=datetime.datetime.now()\n return str(strDate)\n\n\ndef writeLog(message, loglevel):\n if(loglevel.lower()==\"info\"):\n logging.basicConfig(filename='C:/Users/sjanagonnavar/PycharmProjects/MYFW/logs/AutomationLogs.log',level=logging.INFO)\n logging.info(message)\n elif(loglevel.lower()==\"error\"):\n logging.basicConfig(filename='C:/Users/sjanagonnavar/PycharmProjects/MYFW/logs/AutomationLogs.log',level=logging.ERROR)\n logging.error(message)\n elif(loglevel.lower==\"debug\"):\n logging.basicConfig(filename='C:/Users/sjanagonnavar/PycharmProjects/MYFW/logs/AutomationLogs.log',level=logging.WARNING)\n logging.warning(message)\n else:\n logging.error(\"Invalid log level message !!!!!!!!!\")\n\n\n\n\n\n\n\n\n","repo_name":"siddeshvinay/DEC_END","sub_path":"DataTable/Datatable.py","file_name":"Datatable.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"2036272124","text":"import struct\nfrom enum import IntEnum\n\nimport bitstruct\n\nfrom fit import base_type\nfrom fit.exceptions import FitException\nfrom fit.field import Field, FieldDefinition, DevFieldDefinition\n\n\ndef swap16(value):\n return struct.unpack('>H', struct.pack('')\n\n return struct.pack(pack_format, 0, self.architecture, self.global_msg_num, self.num_fields)\n\n\nclass DefinitionMessage(Record):\n \"\"\"The definition message is used to create an association between the local\n message type contained in the record header, and a Global Message Number\n (mesg_num) that relates to the global FIT message.\n \"\"\"\n\n def __init__(self, local_msg_type: int, is_little_endian: bool, global_msg_num: int, field_defs, dev_field_defs):\n \"\"\"\n\n local_msg_type: The Local Message Type is used to create an association between the definition message, data\n message and the FIT message in the Global FIT Profile (global_msg_num). Value is 0-15\n \"\"\"\n\n super().__init__()\n self.is_little_endian = is_little_endian\n self.global_msg_num = global_msg_num\n self.field_defs = field_defs\n self.dev_field_defs = dev_field_defs\n\n has_dev_data = True if dev_field_defs else False\n\n self.header = RecordHeader(RecordHeaderType.NORMAL,\n RecordMessageType.DEFINITION,\n local_msg_type,\n has_dev_data)\n self.field_classes = []\n self.field_lengths = []\n\n def has_dev_data(self):\n return True if self.dev_field_defs else False\n\n def get_local_message_type(self):\n return self.header.local_msg_type\n\n def build_field_classes(self, profile, dev_profile):\n global_msg = profile.get_message_by_id(self.global_msg_num)\n if not global_msg:\n print('WARNING: Global message {} not found in profile'.format(\n self.global_msg_num))\n\n # includes both standard fields and developer fields\n self.field_classes = []\n self.field_lengths = []\n\n for field_def in self.field_defs:\n field_class = global_msg.get_field_by_id(\n field_def.field_num) if global_msg else None\n\n base_type_ = base_type.get_by_id(field_def.base_type_num)\n\n if field_class and field_class.base_type != base_type_:\n # print(\n # 'WARNING: field definition is overriding the base type of this field')\n field_class = Field.new(field_class.def_num,\n field_class.name,\n base_type_,\n scale=field_class.scale,\n offset=field_class.offset,\n units=field_class.units,\n subfields=field_class.subfields)\n\n if not field_class:\n print('WARNING: Field not found in profile, msg: {}, field: {}'.format(self.global_msg_num,\n field_def.field_num))\n\n field_class = Field.new(field_def.field_num,\n 'unknown_{}'.format(\n field_def.field_num),\n base_type_)\n\n field_length = field_class.get_length_from_size(\n field_def.field_size)\n self.field_lengths.append(field_length)\n self.field_classes.append(field_class)\n\n for dev_field_def in self.dev_field_defs:\n dev_msg = dev_profile.get_message_by_id(\n dev_field_def.dev_data_index)\n\n if not dev_msg:\n print(f'WARNING: Dev message {dev_msg} not found in profile')\n\n field_class = dev_msg.get_field_by_id(\n dev_field_def.field_num) if dev_msg else None\n\n field_length = field_class.get_length_from_size(\n dev_field_def.field_size)\n\n self.field_lengths.append(field_length)\n self.field_classes.append(field_class)\n\n def to_bytes(self):\n bbuf = self.header.to_bytes()\n\n architecture = Architecture.LITTLE_ENDIAN if self.is_little_endian else Architecture.BIG_ENDIAN\n preamble = DefinitionMessagePreamble(\n architecture, self.global_msg_num, len(self.field_defs))\n bbuf += preamble.to_bytes()\n\n for field_def in self.field_defs:\n bbuf += field_def.to_bytes()\n\n dev_bytes_count = 0\n if self.dev_field_defs:\n bbuf += struct.pack('B', len(self.dev_field_defs))\n dev_bytes_count += 1\n for dev_field_def in self.dev_field_defs:\n bbuf += dev_field_def.to_bytes()\n dev_bytes_count += len(dev_field_def.to_bytes())\n return bbuf\n\n def to_row(self, profile):\n global_msg = self.get_global_message(profile)\n message_name = global_msg.name if global_msg else 'unknown'\n\n row = [self.header.msg_type.name, self.header.local_msg_type, message_name]\n\n for field_def in self.field_defs:\n row.extend(\n [field_def.field_num, field_def.field_size, base_type.get_by_id(field_def.base_type_num).name])\n\n for field_def in self.dev_field_defs:\n row.extend(\n [field_def.field_num, field_def.field_size, field_def.dev_data_index])\n\n return row\n\n def get_global_message_id(self):\n return self.global_msg_num\n\n @classmethod\n def from_file(cls, fit_file, header=None):\n\n if not header:\n header = RecordHeader.from_file(fit_file)\n\n preamble = DefinitionMessagePreamble.from_file(fit_file)\n\n field_defs = []\n for _ in range(0, preamble.num_fields):\n field_def = FieldDefinition.from_file(fit_file)\n field_defs.append(field_def)\n\n num_dev_fields = struct.unpack_from(\n 'B', fit_file.read(1))[0] if header.has_dev_data else 0\n\n dev_field_defs = []\n for _ in range(0, num_dev_fields):\n dev_field_def = DevFieldDefinition.from_file(fit_file)\n dev_field_defs.append(dev_field_def)\n\n is_little_endian = preamble.architecture == Architecture.LITTLE_ENDIAN\n\n def_msg = cls(header.local_msg_type, is_little_endian, preamble.global_msg_num,\n field_defs, dev_field_defs)\n\n def_msg.build_field_classes(fit_file.profile, fit_file.dev_profile)\n\n return def_msg\n\n @classmethod\n def from_payload(cls, header: RecordHeader, bytes_buffer, offset=0):\n # header = RecordHeader.from_bytes(bytes_buffer, offset)\n # offset += header.size()\n\n preamble = DefinitionMessagePreamble.from_bytes(bytes_buffer, offset)\n offset += preamble.size()\n\n field_defs = []\n for _ in range(0, preamble.num_fields):\n field_def = FieldDefinition.from_bytes(bytes_buffer, offset)\n field_defs.append(field_def)\n offset += field_def.size()\n\n dev_field_defs = []\n if header.has_dev_data:\n\n pack_fmt = 'B'\n num_dev_fields = struct.unpack_from(pack_fmt, bytes_buffer, offset)[0]\n offset += struct.calcsize(pack_fmt)\n\n for _ in range(0, num_dev_fields):\n dev_field_def = DevFieldDefinition.from_bytes(bytes_buffer,\n offset)\n offset += dev_field_def.size()\n dev_field_defs.append(dev_field_def)\n\n is_little_endian = preamble.architecture == Architecture.LITTLE_ENDIAN\n\n return cls(header.local_msg_type, is_little_endian, preamble.global_msg_num,\n field_defs, dev_field_defs)\n\n def size(self):\n size = RecordHeader.size() + DefinitionMessagePreamble.size() + \\\n len(self.field_defs) * FieldDefinition.size()\n\n if self.header.has_dev_data:\n size += 1 + len(self.dev_field_defs) * DevFieldDefinition.size()\n\n return size\n\n def __eq__(self, other):\n if len(self.field_defs) != len(other.field_defs):\n return False\n\n if self.header != other.header:\n return False\n\n if self.is_little_endian != other.is_little_endian:\n return False\n\n if self.global_msg_num != other.global_msg_num:\n return False\n\n return True\n\n\nclass DataMessage(Record):\n\n def __init__(self, msg_def, fields):\n super().__init__()\n self.header = RecordHeader(RecordHeaderType.NORMAL, RecordMessageType.DATA,\n local_msg_type=msg_def.get_local_message_type())\n\n self.msg_def = msg_def\n self.fields = fields\n\n def to_bytes(self):\n buffer = self.header.to_bytes()\n for field, field_def in zip(self.fields, self.msg_def.field_defs + self.msg_def.dev_field_defs):\n length = field.get_length_from_size(field_def.field_size)\n buffer += field.to_bytes(self.msg_def.is_little_endian, length=length)\n\n return buffer\n\n def to_row(self, profile):\n global_msg = self.get_global_message(profile)\n message_name = global_msg.name if global_msg else 'unknown'\n\n row = [self.header.msg_type.name, self.header.local_msg_type, message_name]\n for field in self.fields:\n row.extend(\n [field.get_name(), field.get_value(), field.get_units()])\n\n return row\n\n def get_global_message_id(self):\n return self.msg_def.global_msg_num\n\n def get_field_by_name(self, name):\n for field in self.fields:\n if field.name == name:\n return field\n return None\n\n @classmethod\n def from_file(cls, fit_file, header=None):\n if not header:\n header = RecordHeader.from_file(fit_file)\n\n msg_def = fit_file.get_msg_def(header.local_msg_type)\n\n if not msg_def:\n message = 'Message not defined for local message {} .'.format(header.local_msg_type)\n raise FitException(message)\n\n fields = []\n for field_length, field_class in zip(msg_def.field_lengths, msg_def.field_classes):\n bytes_buffer = fit_file.read(field_class.size(field_length))\n field = field_class.from_bytes(msg_def.is_little_endian,\n bytes_buffer, length=field_length)\n fields.append(field)\n\n # Handle dynamic fields that are dependent on the value of another field\n # in the message\n for field in fields:\n for subfield in field.subfields:\n for ref_field in fields:\n if subfield.ref_field_names[0] == ref_field.name and \\\n subfield.ref_field_values[0] == ref_field.value:\n field.subfield = subfield.from_bytes(msg_def.is_little_endian,\n field.bbuffer[0:subfield.base_type.size()])\n break\n\n # todo: class determination\n MSG_WORKOUT_STEP = 27\n if msg_def.global_msg_num == MSG_WORKOUT_STEP:\n from fit.profile.workout_step_message import WorkoutStepMessage\n return WorkoutStepMessage(msg_def, fields)\n\n return cls(msg_def, fields)\n\n @classmethod\n def from_payload(cls, header: RecordHeader, msg_def: DefinitionMessage, buffer, offset: int = 0) -> 'DataMessage':\n \"\"\"Read a date record from a buffer. Record header has already been read.\n \"\"\"\n fields = []\n for field_length, field_class in zip(msg_def.field_lengths, msg_def.field_classes):\n field = field_class.from_bytes(msg_def.is_little_endian,\n buffer, offset=offset,\n length=field_length)\n offset += field.size(field_length)\n fields.append(field)\n\n # Handle dynamic fields that are dependent on the value of another field\n # in the message\n for field in fields:\n for subfield in field.subfields:\n for ref_field in fields:\n if subfield.ref_field_names[0] == ref_field.name and \\\n subfield.ref_field_values[0] == ref_field.value:\n field.subfield = subfield.from_bytes(msg_def.is_little_endian,\n field.bbuffer[:subfield.base_type.size()])\n break\n\n return cls(msg_def, fields)\n\n def size(self):\n size = RecordHeader.size() + \\\n sum([field_def.field_size for field_def in self.msg_def.field_defs]) + \\\n sum([dev_field_def.field_size\n for dev_field_def in self.msg_def.dev_field_defs])\n\n return size\n\n def __eq__(self, other):\n if self.header != other.header:\n return False\n\n if self.msg_def != other.msg_def:\n return False\n\n for field1, field2 in zip(self.fields, other.fields):\n if field1 != field2:\n return False\n\n return True\n","repo_name":"greensopinion/fit_tool","sub_path":"scripts/fit/record.py","file_name":"record.py","file_ext":"py","file_size_in_byte":18879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"10939994585","text":"import argparse\nimport torch\nimport pandas as pd\nfrom datasets import Audio, Dataset\nfrom transformers import pipeline\nfrom transformers.pipelines.pt_utils import KeyDataset\nfrom sentence_segment import SyllableSegmentation\nfrom utils import convert_mp4_to_wav, perform_vad, generate_srt, burn_srt_to_video\nfrom pydub import AudioSegment\n\n\ndef convert_audio_to_wav(audio_file, target_sr):\n audio = AudioSegment.from_file(audio_file)\n audio = audio.set_frame_rate(target_sr).set_channels(1)\n output_wav_file = audio_file.rsplit('.', 1)[0] + \"_converted.wav\"\n audio.export(output_wav_file, format=\"wav\")\n return output_wav_file\n\ndef main(args):\n SAMPLING_RATE = 16000\n\n # Do ASR\n device = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n pipe = pipeline(\n \"automatic-speech-recognition\",\n model=args.model_path,\n chunk_length_s=30,\n device=device,\n torch_dtype=torch.float16,\n )\n \n if args.input_file.endswith('.mp4'):\n wav_file = convert_mp4_to_wav(args.input_file)\n elif args.input_file.endswith('.wav'):\n # Check sampling rate and convert if necessary\n audio = AudioSegment.from_wav(args.input_file)\n if audio.frame_rate != SAMPLING_RATE:\n wav_file = convert_audio_to_wav(args.input_file, SAMPLING_RATE)\n else:\n wav_file = args.input_file\n else: # Assuming other audio formats such as .mp3, etc.\n wav_file = convert_audio_to_wav(args.input_file, SAMPLING_RATE)\n\n _, chunklist = perform_vad(wav_file, 'temp_directory_for_chunks')\n \n # for faster inference, create dataset\n audio_dataset = Dataset.from_dict({\"audio\": [c[\"fname\"] for c in chunklist]}).cast_column(\"audio\", Audio())\n\n prediction_gen = pipe(\n KeyDataset(audio_dataset, \"audio\"),\n generate_kwargs={\"task\": \"transcribe\", \"language\": \"Thai\"},\n return_timestamps=False,\n batch_size=4,\n ignore_warning=True,\n )\n\n predictions = [out for out in prediction_gen]\n\n vad_transcriptions = {\"start\": [], \"end\": [], \"prediction\": []}\n\n for vad_chunk, pred in zip(chunklist, predictions):\n start_in_samples, end_in_samples = vad_chunk[\"start\"], vad_chunk[\"end\"]\n start_in_s = start_in_samples / (SAMPLING_RATE)\n end_in_s = end_in_samples / (SAMPLING_RATE)\n\n vad_transcriptions[\"prediction\"].append(pred[\"text\"])\n vad_transcriptions[\"start\"].append(start_in_s)\n vad_transcriptions[\"end\"].append(end_in_s)\n\n ss = SyllableSegmentation()\n uncorrected_segments = ss(vad_transcriptions=vad_transcriptions, segment_duration=4.0)\n\n if args.output_format == 'csv':\n df = pd.DataFrame(uncorrected_segments)\n df.to_csv(args.output_file, index=False)\n elif args.output_format == 'srt':\n generate_srt(uncorrected_segments, args.output_file)\n if args.burn_srt:\n burn_srt_to_video(args.input_file, args.output_file)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"ASR pipeline with options\")\n parser.add_argument(\"--input_file\", required=True, help=\"Input video or audio file path\")\n parser.add_argument(\"--output_file\", required=True, help=\"Output file path (CSV or SRT based on the format specified)\")\n parser.add_argument(\"--model_path\", default='/path/to/default/model', help=\"Path to the whisper model\")\n parser.add_argument(\"--output_format\", choices=['csv', 'srt'], default='csv', help=\"Output format, either csv or srt\")\n parser.add_argument(\"--burn_srt\", action='store_true', help=\"Option to burn the srt to the input video (only works if output_format is srt)\")\n\n args = parser.parse_args()\n main(args)","repo_name":"biodatlab/thonburian-whisper","sub_path":"longform_transcription/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3695,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"5"} +{"seq_id":"27864419913","text":"from typing import Any, Dict, Generic, List, Optional, Type, TypeVar, Union\nfrom fastapi.encoders import jsonable_encoder\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom sqlalchemy.engine import Result\nfrom sqlalchemy import select, insert\nfrom pydantic import BaseModel\n\nfrom app.src.base.db import Base\n\n# Define custom types for SQLAlchemy model, and Pydantic schemas\nModelType = TypeVar(\"ModelType\", bound=Base)\nCreateSchemaType = TypeVar(\"CreateSchemaType\", bound=BaseModel)\nUpdateSchemaType = TypeVar(\"UpdateSchemaType\", bound=BaseModel)\n\n\nclass CRUDBase(Generic[ModelType, CreateSchemaType, UpdateSchemaType]):\n def __init__(self, model: Type[ModelType]):\n \"\"\"Base class that can be extended by other action classes.\n Provides basic CRUD and listing operations.\n :param model: The SQLAlchemy model\n :type model: Type[ModelType]\n \"\"\"\n self.model = model\n\n async def get_multi(\n self, db: AsyncSession, *, skip: int = 0, limit: int = 100\n ) -> List[ModelType]:\n res: Result = await db.execute(select(self.model).offset(skip).limit(limit))\n res: list[ModelType] = res.scalars().all()\n return res\n\n async def get_by_ids(self, db: AsyncSession, *, ids: list[int]) -> List[ModelType]:\n \"\"\"\n return all Models by ids\n :param db: AsyncSession\n :param ids: list[int]\n :return: list[Model]\n \"\"\"\n return (\n (await db.execute(select(self.model).where(self.model.id.in_(ids))))\n .scalars()\n .all()\n )\n\n async def get(self, db: AsyncSession, id: int) -> Optional[ModelType]:\n \"\"\"\n return Model by id\n :param db: AsyncSession\n :param id: int\n :return: Model\n \"\"\"\n return (\n await db.execute(select(self.model).where(self.model.id == id))\n ).scalar_one_or_none()\n\n async def create(self, db: AsyncSession, *, obj_in: CreateSchemaType) -> ModelType:\n \"\"\"\n create an object in db\n :param db: AsyncSession\n :param obj_in: schema.ModelCreate\n :return: Model\n \"\"\"\n obj_in_data = jsonable_encoder(obj_in)\n db_obj = self.model(**obj_in_data) # type: ignore\n db.add(db_obj)\n await db.commit()\n await db.refresh(db_obj)\n return db_obj\n\n async def update(\n self,\n db: AsyncSession,\n *,\n db_obj: ModelType,\n obj_in: Union[UpdateSchemaType, Dict[str, Any]],\n ) -> ModelType:\n \"\"\"\n update Model in db\n :param db: AsyncSession\n :param db_obj: Model\n :param obj_in: schemas.ModelUpdate\n :return: Model\n \"\"\"\n obj_data = jsonable_encoder(db_obj)\n if isinstance(obj_in, dict):\n update_data = obj_in\n else:\n update_data = obj_in.dict(exclude_unset=True)\n for field in obj_data:\n if field in update_data:\n setattr(db_obj, field, update_data[field])\n # db.add(db_obj)\n await db.commit()\n await db.refresh(db_obj)\n return db_obj\n\n async def remove(self, db: AsyncSession, *, id: int) -> ModelType or None:\n \"\"\"\n delete Model from db\n :param db: AsyncSession\n :param id: int\n :return: Model\n \"\"\"\n try:\n obj = (await db.execute(db.query(self.model).get(id))).scalar_one()\n except Exception as err:\n print(err)\n return None\n await db.delete(obj)\n await db.commit()\n return obj\n\n async def get_all(\n self, db: AsyncSession, only_ids: bool = False\n ) -> Union[List[ModelType], List[int]]:\n \"\"\"\n Get all objects from the database or only object ids\n :param db: The database session\n :type db: AsyncSession\n :param only_ids: If True, only object ids will be returned, defaults to False\n :type only_ids: bool, optional\n :return: List of objects or list of object ids\n \"\"\"\n if only_ids:\n res: list[tuple[int]] = (\n (await db.execute(select(self.model.id))).scalars().all()\n )\n return [x[0] for x in res]\n return (await db.execute(select(self.model))).scalars().all()\n","repo_name":"RomaOkorosso/aerodisk_test_task","sub_path":"app/src/base/crud/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"27163844482","text":"from flask import Flask, render_template, request, redirect\nfrom flask_cors import CORS\n\nCORS(app)\napp = Flask(__name__)\n\n\n@app.route(\"/\", methods=[\"POST\"])\ndef predict():\n image_file = request.files['imageFile']\n image_path = './images' + image_file.filename\n image_file.save(image_path)\n return\n","repo_name":"Brandon-T84/Identifiir","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"22091472790","text":"class Item(object):\n \"\"\"Base Item class\n\n Usage:\n >>> item01 = Item(\"sword\", 150)\n >>> print item01\n \n \"\"\"\n\n def __init__(self, name=None, value=None):\n numeric = (int, float, long)\n self.name = \"item\" if not isinstance(name, basestring) else name\n self.value = 0 if not isinstance(value, numeric) else value\n\n def __repr__(self):\n class_name = self.__class__.__name__\n name = self.name\n value = self.value\n return \"<{} {}({})>\".format(class_name, name, value)\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()","repo_name":"LearnPythonAndMakeGames/BasicPythonTutorialSeries","sub_path":"basic_tutorials/debugger.py","file_name":"debugger.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"5"} +{"seq_id":"6717547128","text":"def dfs(x, y, s):\n if len(s) == 7:\n nums.add(s)\n return\n else:\n dx, dy = [-1, 0, 1, 0], [0, 1, 0, -1] # 위, 오, 아래, 왼\n for k in range(4):\n nx, ny = x + dx[k], y + dy[k]\n if 0<=nx<4 and 0<=ny<4:\n dfs(nx, ny, s+arr[nx][ny])\n\nT = int(input())\nfor tc in range(1, T+1):\n arr = [list(input().split()) for _ in range(4)]\n nums = set()\n for i in range(4):\n for j in range(4):\n dfs(i, j, arr[i][j])\n print(f'#{tc} {len(nums)}')","repo_name":"minchae9/TIL","sub_path":"algorithm/2021/1008/SWEA_2819.py","file_name":"SWEA_2819.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20736376827","text":"\"\"\"Tests for full_text_search.py\"\"\"\n\nimport datetime\nimport unittest\n\nfrom google.appengine.ext import db\nfrom google.appengine.ext import testbed\nfrom google.appengine.api import search\nimport sys\nimport logging\nimport delete\nimport full_text_search\nimport model\n\nTEST_DATETIME = datetime.datetime(2010, 1, 1, 0, 0, 0)\n\nclass FullTextSearchTests(unittest.TestCase):\n def setUp(self):\n self.tb = testbed.Testbed()\n self.tb.activate()\n self.tb.init_search_stub()\n self.p1 = model.Person.create_original_with_record_id(\n 'haiti',\n 'haiti/0505',\n given_name='Iori',\n family_name='Minase',\n full_name='Iori Minase',\n alternate_names='Iorin',\n entry_date=TEST_DATETIME\n )\n self.p2 = model.Person.create_original_with_record_id(\n 'haiti',\n 'haiti/1123',\n given_name='Miki',\n family_name='Hoshii',\n full_name='Miki Hoshii',\n entry_date=TEST_DATETIME\n )\n self.p3 = model.Person.create_original_with_record_id(\n 'haiti',\n 'haiti/0522',\n given_name='Ami',\n family_name='Futami',\n full_name='Ami Futami',\n entry_date=TEST_DATETIME\n )\n self.p4 = model.Person.create_original_with_record_id(\n 'haiti',\n 'haiti/0225',\n given_name='Chihaya',\n family_name='Kisaragi',\n full_name='Chihaya Kisaragi',\n home_street='Kunaideme72',\n home_city='Arao',\n home_state='Kumamoto',\n home_postal_code='864-0003',\n home_neighborhood='Araokeibajou',\n home_country='Japan',\n entry_date=TEST_DATETIME\n )\n self.p5 = model.Person.create_original_with_record_id(\n 'haiti',\n 'haiti:0810',\n given_name='Rin',\n family_name='Shibuya',\n full_name='Rin Shibuya',\n home_city='shinjuku',\n entry_date=TEST_DATETIME\n )\n\n def tearDown(self):\n db.delete(model.Person.all())\n self.tb.deactivate()\n\n def test_search_by_name_only(self):\n db.put(self.p1)\n db.put(self.p2)\n db.put(self.p3)\n db.put(self.p4)\n db.put(self.p5)\n full_text_search.add_record_to_index(self.p1)\n full_text_search.add_record_to_index(self.p2)\n full_text_search.add_record_to_index(self.p3)\n full_text_search.add_record_to_index(self.p4)\n full_text_search.add_record_to_index(self.p5)\n\n # Search by alternate name - p1\n results = full_text_search.search('haiti', {'name': 'Iorin'}, 5)\n assert set([r.record_id for r in results]) == \\\n set(['haiti/0505'])\n\n # Search by family name -p1\n results = full_text_search.search('haiti', {'name': 'Minase'}, 5)\n assert set([r.record_id for r in results]) == \\\n set(['haiti/0505'])\n\n # Search by given name - p1\n results = full_text_search.search('haiti', {'name': 'Iori'}, 5)\n assert set([r.record_id for r in results]) == \\\n set(['haiti/0505'])\n\n # Search by given name + family name - p1\n results = full_text_search.search('haiti', {'name': 'Minase Iori'}, 5)\n assert set([r.record_id for r in results]) == \\\n set(['haiti/0505'])\n\n # Search by full name - p1\n resutls = full_text_search.search('haiti', {'name': 'Iori Minase'}, 5)\n assert set([r.record_id for r in results]) == \\\n set(['haiti/0505'])\n\n # Search by a name contains location - p4\n results = full_text_search.search('haiti', {'name': 'Chihaya Arao'}, 5)\n assert not results\n\n # Search by name & location - p4\n results = full_text_search.search('haiti', {'name':'Chihaya',\n 'location': 'Arao'}, 5)\n assert set([r.record_id for r in results]) == \\\n set(['haiti/0225'])\n\n # Search by home_street only ( input inside the name box) - p4\n results = full_text_search.search('haiti', {'name': 'Kunaideme72'}, 5)\n assert not results\n\n # Search by home_city only ( input inside the location box) - p4\n results = full_text_search.search('haiti', {'location': 'Arao'}, 5)\n assert not results\n\n # Search by home_state only ( input inside the location box) - p4\n results = full_text_search.search('haiti', {'location': 'Kumamoto'}, 5)\n assert not results\n\n # Search by home_postal_code only ( input inside the name box) - p4\n results = full_text_search.search('haiti', {'name': '864-0003'}, 5)\n assert not results\n\n # Search by home_neighborhood only ( input inside the location box) - p4\n results = full_text_search.search(\n 'haiti', {'location': 'Araokeibajou'}, 5)\n assert not results\n\n # Search by home_country only ( input inside the name box) - p4\n results = full_text_search.search('haiti', {'name': 'Japan'}, 5)\n assert not results\n\n # Check no results\n results = full_text_search.search('haiti', {'name': 'Producer san'}, 5)\n assert not results\n\n # Search with no query text\n results = full_text_search.search(\n 'haiti', {'name': '', 'location': ''}, 5)\n assert not results\n\n # Search deleted record - p3\n delete.delete_person(self, self.p3)\n results = full_text_search.search('haiti', {'name': 'Ami'}, 5)\n assert not results\n\n # Search with empty dict\n results = full_text_search.search('haiti', {}, 5)\n\n # Search by full name - p5\n results = full_text_search.search('haiti', {'name': 'Rin Shibuya'}, 5)\n assert set([r.record_id for r in results]) == \\\n set(['haiti:0810'])\n\n\n # Search Name with Location part contain a part of person's name - p5\n results = full_text_search.search('haiti',\n {'name': 'Rin Shibuya',\n 'location': 'Shinjuku Rin'}, 5)\n assert not results\n\n # Input the name and location in the wrong box - p5\n results = full_text_search.search('haiti',\n {'name': 'Shinjuku',\n 'location': 'Rin Shibuya'}, 5)\n assert not results\n\n def test_delete_record_from_index(self):\n db.put(self.p2)\n full_text_search.add_record_to_index(self.p2)\n full_text_search.delete_record_from_index(self.p2)\n results = full_text_search.search('haiti', {'name': 'Miki'}, 5)\n assert not results\n","repo_name":"pockemon/Person-Finder-Testing","sub_path":"Unit-server tests/unit_test_cases/test_full_text_search.py","file_name":"test_full_text_search.py","file_ext":"py","file_size_in_byte":6837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"202272500","text":"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\n\nsys.path.append(\".\")\nfrom CNN.modules.linear import Linear\nfrom CNN.modules.softmax import Softmax\nfrom CNN.modules.relu import Relu\nfrom CNN.modules.tanh import Tanh\nfrom CNN.modules.convolution import Convolution\nfrom CNN.modules.avgpool import AvgPool\nfrom CNN.modules.maxpool import MaxPool\nfrom CNN.modules.utils import Utils, Summaries\nimport CNN.input_data\n\nimport tensorflow as tf\nimport numpy as np\nimport pdb\nimport scipy.io as sio\nimport skimage.io as io\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n\nfrom Model import nn\n\nflags = tf.flags\nlogging = tf.logging\n\n\"\"\"\nflags.DEFINE_integer(\"max_steps\", 100000, 'Number of steps to run trainer.')\nflags.DEFINE_integer(\"batch_size\", 100, 'Number of steps to run trainer.')\nflags.DEFINE_integer(\"test_every\", 5, 'Number of steps to run trainer.')\nflags.DEFINE_float(\"learning_rate\", 0.005, 'Initial learning rate')\nflags.DEFINE_string(\"data_dir\", 'data', 'Directory for storing data')\nflags.DEFINE_string(\"summaries_dir\", 'logs', 'Summaries directory')\nflags.DEFINE_boolean(\"save_model\", True, 'Save the trained model')\nflags.DEFINE_boolean(\"reload_model\", False, 'Restore the trained model')\nflags.DEFINE_string(\"checkpoint_dir\", 'cifar_trained_model2', 'Checkpoint dir')\nflags.DEFINE_string(\"checkpoint_reload_dir\", 'cifar_trained_model2', 'Checkpoint dir')\nflags.DEFINE_integer(\"Class\", 10, 'Number of class.')\n\"\"\"\n\nFLAGS = flags.FLAGS\n\nfrom tensorflow.keras.datasets.cifar10 import load_data\n\ndef next_batch(num, data, labels):\n '''\n Return a total of `num` random samples and labels. \n '''\n idx = np.arange(0, len(data))\n np.random.shuffle(idx)\n idx = idx[:num]\n data_shuffle = [data[i] for i in idx]\n labels_shuffle = [labels[i] for i in idx]\n\n return np.asarray(data_shuffle), np.asarray(labels_shuffle)\n\ndef train(tag, worker, image_dir=None):\n FLAGS.max_steps = 100000\n FLAGS.reload_model= False\n\n # Import data\n config = tf.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth = True\n if tag == 'train':\n FLAGS.batch_size = 100\n elif tag == 'test':\n FLAGS.batch_size = 1\n with tf.Session(config=config) as sess:\n\n # with tf.Session() as sess:\n # Input placeholders\n with tf.name_scope('input'):\n x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])\n y_ = tf.placeholder(tf.float32, shape=[None, 10])\n phase = tf.placeholder(tf.bool, name='phase')\n\n with tf.variable_scope('model'):\n net = nn(phase)\n inp = tf.reshape(x, [FLAGS.batch_size, 32, 32, 3])\n op = net.forward(inp)\n y = tf.reshape(op, [FLAGS.batch_size, 10])\n\n trainer = net.fit(output=y, ground_truth=y_, loss='softmax_crossentropy', optimizer='adam',\n opt_params=[FLAGS.learning_rate])\n with tf.name_scope('accuracy'):\n accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)), tf.float32))\n tf.summary.scalar('accuracy', accuracy)\n\n # Merge all the summaries and write them out to /tmp/mnist_logs (by default)\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train', sess.graph)\n test_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/test')\n\n tf.global_variables_initializer().run()\n\n utils = Utils(sess, FLAGS.checkpoint_reload_dir)\n if FLAGS.reload_model:\n utils.reload_model(dataset='CIFAR10')\n (x_train, y_train), (x_test, y_test) = load_data()\n y_train_one_hot = tf.squeeze(tf.one_hot(y_train, 10), axis=1)\n y_test_one_hot = tf.squeeze(tf.one_hot(y_test, 10), axis=1)\n if tag=='train':\n for i in range(FLAGS.max_steps):\n d = next_batch(FLAGS.batch_size, x_train, y_train_one_hot.eval())\n inp = {x: d[0], y_: d[1], phase: True}\n summary, _, acc, op2 = sess.run(\n [merged, trainer.train, accuracy, y], feed_dict=inp)\n # train_writer.add_summary(summary, i)\n acc_msg = 'Error-rate at step %s: %f' % (i, 1-acc)\n worker.train_msg.emit(acc_msg)# send signal\n print(acc_msg)\n Utils.save_model(self=utils, dataset='CIFAR10')\n\n if tag == 'test': # test-set accuracy\n FLAGS.batch_size = 1\n image_dir = image_dir\n print('INPUT:',image_dir)\n\n utils = Utils(sess, FLAGS.checkpoint_reload_dir)\n utils.reload_model(dataset='CIFAR10')\n\n d = io.imread(image_dir).astype('float32')\n d = d.reshape([1, 32, 32, 3])\n test_inp = {x: d, phase: False}\n # pdb.set_trace()\n\n import timeit\n start = timeit.default_timer()\n\n logit = sess.run([y], feed_dict=test_inp)\n logit = logit[0].squeeze()\n print(logit)\n pred = np.argmax(logit)\n\n cls = ['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']\n stop = timeit.default_timer()\n print('Runtime: %f' % (stop - start))\n # test_writer.add_summary(summary, i)\n prob_msg = '''\n\nairplane : %.2f\nautomobile : %.2f\nbird : %.2f\ncat : %.2f\ndeer : %.2f\ndog : %.2f\nfrog : %.2f\nhorse : %.2f\nship : %.2f\ntruck : %.2f\\n\n ''' % (\n logit[0], logit[1], logit[2], logit[3], logit[4], logit[5], logit[6], logit[7], logit[8], logit[9])\n pred_msg='Predict: %s'%(cls[pred])\n worker.test_msg.emit([prob_msg, pred_msg])\n print(pred_msg)\n # pdb.set_trace()\n\n train_writer.close()\n test_writer.close()\n\n\n\ndef run(tag='train', worker=None, image_dir=None):\n if tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n train(tag, worker, image_dir)\n\n\"\"\"\nif __name__ == '__main__':\n tf.app.run()\n\"\"\"","repo_name":"jaehyunnn/DeepLearning_GUI","sub_path":"CNN/cifar_convolutional.py","file_name":"cifar_convolutional.py","file_ext":"py","file_size_in_byte":6269,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"9797698870","text":"# imort de l'aglgorithm heapq permettant d'utiliser l'element list pour ajouter ou retirer des elements à une liste en la laissant toujours en ordre\r\nimport heapq\r\nfrom heapq import heappop, heappush\r\n\r\n\r\ndef isLeaf(root):\r\n return root.left is None and root.right is None\r\n\r\n\r\n# un noeud de l'arbre\r\nclass Node:\r\n def __init__(self, ch, freq, left=None, right=None):\r\n self.ch = ch\r\n self.freq = freq\r\n self.left = left\r\n self.right = right\r\n\r\n # On Override la finction __lt__() pour que le noeud marche avec priorité pour que la lettre avec la plus haute priorité ait la fréquence la moins élevée\r\n\r\n def __lt__(self, other):\r\n return self.freq < other.freq\r\n\r\n#on traverse l'arbre de huffman et on stocke les codes de huffman pour chaque lettrre dans un dictionaire\r\n\r\ndef encode(root, str, huffman_code):\r\n if root is None:\r\n return\r\n\r\n # trouver la feuille d'un noeud\r\n if isLeaf(root):\r\n huffman_code[root.ch] = str if len(str) > 0 else '1'\r\n\r\n encode(root.left, str + '0', huffman_code)\r\n encode(root.right, str + '1', huffman_code)\r\n\r\n\r\n#on parcours l'arbre et on décode le message encodé\r\ndef decode(root, index, str):\r\n if root is None:\r\n return index\r\n\r\n # trouver la feuille d'un noeud\r\n if isLeaf(root):\r\n print(root.ch, end='')\r\n return index\r\n\r\n index = index + 1\r\n root = root.left if str[index] == '0' else root.right\r\n return decode(root, index, str)\r\n\r\n#On construit l'arbre et on décode le texte rentré en input\r\ndef buildHuffmanTree(text):\r\n # cas basique: la chaine est vide\r\n if len(text) == 0:\r\n return\r\n\r\n # on compte la fréquence d'apparution de chaque caractère et on le stocke dans un dictionnaire\r\n freq = {i: text.count(i) for i in set(text)}\r\n\r\n # # on crée un chaine de priorité pour stocké nos différents noeuds de l'arbre\r\n pq = [Node(k, v) for k, v in freq.items()]\r\n heapq.heapify(pq)\r\n\r\n # on le fait tant qu'il y a plus d'un noeud dans la chaine\r\n while len(pq) != 1:\r\n # on enleve les 2 noeuds qui ont la plus haute priorité (la féquence la moins importante dans la chaine\r\n\r\n\r\n left = heappop(pq)\r\n right = heappop(pq)\r\n\r\n # On crée un noveau noeud avec ces 2 noeuds enfants avec la fréquence qui est égale à la somme des 2 noeuds\r\n\r\n # on ajoute le nouveau noeud à la chaine de priorité\r\n\r\n total = left.freq + right.freq\r\n heappush(pq, Node(None, total, left, right))\r\n\r\n # root enregistre la racine de l'arbre de Huffman\r\n root = pq[0]\r\n\r\n # on parcours l'arbre de huffman et on la stock dans un dictionnaire\r\n\r\n huffmanCode = {}\r\n encode(root, \"\", huffmanCode)\r\n\r\n #la récureance de chaque lettre dans le message\r\n nbLetter=len(text.replace(\" \", \"\"))\r\n\r\n print('le message original pèse ',nbLetter*8,\" octets\")\r\n\r\n #on affiche les codes d'huffman\r\n print(\"les codes d'Huffman sont:\", huffmanCode)\r\n print(\"Le message original est:\", text)\r\n\r\n # on affiche le message encodé\r\n str = \"\"\r\n for c in text:\r\n str += huffmanCode.get(c)\r\n\r\n print(\"le message encodé est:\", str)\r\n\r\n #on affiche en bytes le tockage de la taille compréssée\r\n byte=0\r\n for i in str:\r\n byte+=1\r\n\r\n print(\"la taille comprésse est de \",byte,\" octets\")\r\n\r\n if isLeaf(root):\r\n # cas spéciaux: pour les input comme b, bbbb, bbbbbbb, etc.\r\n while root.freq > 0:\r\n print(root.ch, end='')\r\n root.freq = root.freq - 1\r\n else:\r\n # on traverse encore l'abre de huffman et cette fois in décode la chaine,\r\n index = -1\r\n while index < len(str) - 1:\r\n index = decode(root, index, str)\r\n\r\n\r\nif __name__ == '__main__':\r\n text = input(\"rentrer votre chaine à encoder ici\")\r\n buildHuffmanTree(text)\r\n","repo_name":"Nedgs/Huffman-coding","sub_path":"codagehuffman.py","file_name":"codagehuffman.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"70409620879","text":"def solution(text, anagram, sw):\n answer = ''\n temp = [[]] * len(text) # not [] but [[]]\n if sw == True:\n for i in range(len(text)):\n temp[anagram[i]] = text[i]\n for i in temp:\n answer += i\n else:\n for i in range(len(text)):\n answer += text[anagram[i]]\n return answer","repo_name":"kimpro82/MyCodingContest","sub_path":"Programmers/PCCE/01/10.py","file_name":"10.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"31559992636","text":"# pacmanAgents.py\n# ---------------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n#\n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\n\n\nfrom pacman import Directions\nfrom game import Agent\nfrom heuristics import *\nimport random\nimport math\n\nclass RandomAgent(Agent):\n # Initialization Function: Called one time when the game starts\n def registerInitialState(self, state):\n return;\n\n # GetAction Function: Called with every frame\n def getAction(self, state):\n # get all legal actions for pacman\n actions = state.getLegalPacmanActions()\n # returns random action from all the valide actions\n return actions[random.randint(0,len(actions)-1)]\n\nclass RandomSequenceAgent(Agent):\n # Initialization Function: Called one time when the game starts\n def registerInitialState(self, state):\n self.actionList = [];\n for i in range(0,10):\n self.actionList.append(Directions.STOP);\n return;\n\n # GetAction Function: Called with every frame\n def getAction(self, state):\n # get all legal actions for pacman\n possible = state.getAllPossibleActions();\n for i in range(0,len(self.actionList)):\n self.actionList[i] = possible[random.randint(0,len(possible)-1)];\n tempState = state;\n for i in range(0,len(self.actionList)):\n if tempState.isWin() + tempState.isLose() == 0:\n tempState = tempState.generatePacmanSuccessor(self.actionList[i]);\n else:\n break;\n # returns random action from all the valide actions\n return self.actionList[0];\n\nclass HillClimberAgent(Agent):\n # Initialization Function: Called one time when the game starts\n def registerInitialState(self, state):\n # nothing Initialization\n return;\n\n # GetAction Function: Called with every frame\n def getAction(self, state):\n # TODO: write Hill Climber Algorithm instead of returning Directions.STOP\n possible = state.getAllPossibleActions()\n flag = False\n # create action sequence\n actSeq = list()\n finScore = [gameEvaluation(state, state), [Directions.STOP]]\n for i in range(5):\n actSeq.append(random.choice(possible))\n\n while True:\n actionNext = list()\n for i in range(5):\n if (random.randint(0, 1) == 0):\n actionNext.append(actSeq[i])\n else:\n actionNext.append(random.choice(possible))\n stateCur = state\n # perform all the sequence\n for action in actionNext:\n stateNext = stateCur.generatePacmanSuccessor(action)\n if stateNext is not None:\n if not stateNext.isWin() and not stateNext.isLose():\n stateCur = stateNext\n # if win, just return.. (if comment this, it also works)\n if stateNext.isWin():\n return actionNext[0]\n if stateNext.isLose():\n break\n continue\n else:\n flag = True\n break\n if flag == True:\n return finScore[1][0]\n score = gameEvaluation(state, stateCur)\n if (score > finScore[0]):\n finScore[0] = score\n finScore[1] = actionNext\n # finScore[1].append(actionNext[0])\n actSeq = actionNext\n else:\n continue\n\n # return Directions.STOP\n\nclass GeneticAgent(Agent):\n # Initialization Function: Called one time when the game starts\n def registerInitialState(self, state):\n return;\n\n # GetAction Function: Called with every frame\n def getAction(self, state):\n # TODO: write Genetic Algorithm instead of returning Directions.STOP\n # DataStructure:\n # 0 1 2\n # [actionsequence, score, rank]\n\n # The Threshold of generationTimes. (Decide when will we end)\n generationTimes = 10\n # Initialization population\n population = list()\n possible = state.getAllPossibleActions()\n\n for i in range(8):\n actionSeq = list()\n for j in range(5):\n actionSeq.append(random.choice(possible))\n population.append([actionSeq, 0, 0])\n\n # rankSelect\n # select from the list [0, 2 numbers 1, 3 numbers 2,.... 8 numbers 7]\n # so pick index 7 from the list's probability is 8/(8 + 7 + .. + 1)\n choicesRank = list()\n for i in range(8):\n for j in range(i + 1):\n choicesRank.append(i)\n # print(choicesRank)\n\n while generationTimes > 0:\n nextPopulation = list()\n # calculate fitness score for the population\n for i in range(len(population)):\n actionSeq = population[i][0]\n finState = state\n tempState = state\n for action in actionSeq:\n tempState = finState.generatePacmanSuccessor(action)\n if tempState is None:\n break\n if tempState.isLose():\n break\n finState = tempState\n if tempState is None:\n continue\n score = gameEvaluation(state, finState)\n population[i][1] = score\n\n # assign ranking\n rankSum = 0\n population.sort(key = lambda x : x[1])\n for i in range(len(population)):\n population[i][2] = i + 1\n rankSum = rankSum + population[i][2]\n\n # nextPopulation\n while (len(nextPopulation) < 8):\n # pick each pair according ranking :\n index = random.choice(choicesRank) #line133 explain\n index2 = random.choice(choicesRank)\n while index2 == index:\n index2 = random.choice(choicesRank)\n\n # Apply a random test, If the test result is less (or equal) to 70%\n # the pair will generate two children by crossing-over.\n randomTest = random.random()\n if randomTest <= 0.7:\n parent1 = population[index]\n parent2 = population[index2]\n newGene = list()\n for i in range(5):\n randomTest2 = random.random()\n if randomTest2 < 0.5:\n newGene.append(parent1[0][i])\n else:\n newGene.append(parent2[0][i])\n nextPopulation.append([newGene, 0, 0])\n else:\n if (len(nextPopulation) == 7):\n continue\n nextPopulation.append(population[index])\n nextPopulation.append(population[index2])\n\n # mutate:\n for i in range(8):\n randomTest = random.random()\n if randomTest <= 0.1:\n indexRan = random.randint(0, 4)\n nextPopulation[i][0][indexRan] = random.choice(nextPopulation[i][0])\n\n # generate K times\n generationTimes = generationTimes - 1\n population = nextPopulation\n\n #\n population.sort(key = lambda x : x[1])\n finAction = population[len(population) - 1][0][0]\n\n return finAction\n\nclass MCTSAgent(Agent):\n # Initialization Function: Called one time when the game starts\n def registerInitialState(self, state):\n # 0 1 2 3 4\n # NODE (action, child, score, parent, visited)\n return;\n\n # GetAction Function: Called with every frame\n def getAction(self, state):\n # Data Structure (instead)\n # index 0 1 2 3 4\n # NODE (action, child, score, parent, visited)\n # TODO: write MCTS Algorithm instead of returning Directions.STOP\n\n # count the UCT score to get best score child\n def UCT(node, visitTimes):\n # socre / visited\n return node[2] / float(node[4]) + (math.sqrt(2 * math.log(float(visitTimes)) / float(node[4])))\n\n def fullExpand(node, state):\n actions = state.getLegalPacmanActions()\n if (len(actions) == len(node[1])):\n return True\n return False\n\n def treePolicy(node, state):\n # Check if the current node is the leaf node\n while True:\n if fullExpand(node, state):\n # get best score child\n node = max(node[1], key = lambda x: UCT(x, node[4]))\n else:\n return expand(node, state)\n return node\n\n def expand(node, state):\n childAction = [child[0] for child in node[1]]\n actions = state.getLegalPacmanActions()\n for action in actions:\n stateNext = state.generatePacmanSuccessor(action)\n if stateNext is None:\n return None\n if stateNext.isLose():\n return None\n if action not in childAction:\n newNode = [action, [], 0.0, node, 1]\n node[1].append(newNode)\n return newNode\n return None\n\n def defaultPolicy(node, state):\n # roll out 5 times\n curState = state\n actions = state.getLegalPacmanActions()\n if len(actions) == 0:\n return None\n for i in range(5):\n randomAction = random.choice(actions)\n curState = curState.generatePacmanSuccessor(randomAction)\n if curState == None:\n return None\n if curState.isLose():\n return None\n reward = gameEvaluation(rootState, curState)\n return reward\n\n def bp(node, reward):\n while node != None:\n node[4] = node[4] + 1\n node[2] = node[2] + reward\n node = node[3]\n return\n\n # Main Funtion:\n rootState = state\n root = [None, [], 0.0, None, 1]\n while True:\n expandNode = treePolicy(root, state)\n if expandNode == None:\n break\n reward = defaultPolicy(expandNode, state)\n if reward == None:\n break\n bp(expandNode, reward)\n\n if len(root[1]) == 0:\n return Directions.STOP\n # get most visited node\n bestVisit = max(root[1], key=lambda x: x[4])[4]\n bestAction = [child[0] for child in root[1] if child[4] == bestVisit]\n\n return random.choice(bestAction)\n","repo_name":"dtccx/aiproject","sub_path":"pacmanAgents.py","file_name":"pacmanAgents.py","file_ext":"py","file_size_in_byte":11475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"10750706776","text":"import sys\nimport os\nimport random\nimport vedo \nimport numpy as np \n\ndir = os.path.join(os.path.abspath(os.path.dirname(__file__)), os.path.pardir, 'src')\nsys.path.append(os.path.abspath(dir))\nprint(dir)\n\nfrom BaseDigitalTwin import BaseDigitalTwin\nfrom beam import Beam\n\nif __name__ == '__main__':\n \"\"\"\n Given the centerlines and a sample_rate, this generates and plot the Bezier curve fitting and sampling \n \"\"\"\n\n # Input mesh \n mesh = \"./data/mesh/processed_porteveine.stl\"\n\n # Input text file where centerlines will be stored\n skeleton_file = \"./data/skeleton/output_skeleton.txt\"\n\n # Structuring centerlines into a list of polylines \n baseDT = BaseDigitalTwin()\n baseDT.get_centerlines(mesh, skeleton_file)\n vessel = baseDT.getSkeletonData(skeleton_file)\n\n # Bezier curves\n BezierCurves = []\n\n # Sampled points \n SampledPoints = []\n\n # List of colors\n chars = '0123456789ABCDEF'\n colors = ['#'+''.join(random.sample(chars,6)) for i in range(100)]\n \n for branch in vessel: \n\n # Using the Beam class, we approximate a bezier curve on the centerlines and sampe points each 0.005 \n beam = Beam(branch, 0.005)\n sample = beam.sample\n\n # Vedo Points data Structure\n sample = vedo.Points(sample, r=5)\n color = np.random.choice(colors)\n sample.c(color)\n SampledPoints.append(sample)\n\n # Vedo Bezier Curves visualization \n BezierCurves.append(vedo.Bezier(beam.sample).c(color))\n\n # Plotting\n vedo.show(SampledPoints, BezierCurves)","repo_name":"Sidaty1/DigitalTwin","sub_path":"unittests/unittest_bezier_fitting.py","file_name":"unittest_bezier_fitting.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"37569083942","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 27.11.2020 20:34\n \n@author: Piotr Gradkowski \n\"\"\"\n\n__authors__ = ['Piotr Gradkowski ']\n__date__ = '2020-11-27'\n__all__ = ['HtmlWriter']\n\nimport logging\nimport os\nimport re\n\nfrom src.config import EpubSongbookConfig\nfrom src.tixi import Tixi\n\n\nclass HtmlWriter(object):\n \"\"\"A generic class used to write the xhtml files.\"\"\"\n\n def __init__(self, tixi: Tixi, settings: EpubSongbookConfig):\n self.src_tixi = tixi\n self.settings = settings\n self.tixi = Tixi()\n self.tixi.create(\"html\")\n self.tixi.addTextAttribute(\"/html\", \"xmlns\", \"http://www.w3.org/1999/xhtml\")\n self.tixi.createElement(\"/html\", \"head\")\n\n headPath = \"/html/head\"\n\n self.tixi.addTextElement(headPath, \"title\", self.settings.title)\n self.tixi.createElement(headPath, \"link\")\n\n linkPath = headPath + \"/link\"\n\n attrs = {\"rel\": \"stylesheet\",\n \"type\": \"text/css\",\n \"href\": \"../songbook.css\"}\n for a in attrs:\n self.tixi.addTextAttribute(linkPath, a, attrs[a])\n\n self.root = \"/html\"\n\n def saveFile(self, fileName):\n \"\"\"Apply specific formatting and save the content of the self.self.tixi to a file filename\"\"\"\n text = self.tixi.exportDocumentAsString()\n\n # First of all, add encoding if present\n if self.settings.encoding is not None:\n text = text[:19] + \" encoding='{}'\".format(self.settings.encoding) + text[19:]\n replaceRules = {\n \"<br/>\": \"
\",\n \"&\": \"&\"\n }\n for rr in replaceRules:\n text = text.replace(rr, replaceRules[rr])\n # Now regular expressions\n\n # fold the table rows into a single line\n text = re.sub(r\"(<\\/?t[dr].*?>)\\s+(<\\/?t[dr])\", r\"\\1\\2\", text)\n # repeat to catch also the overlapping tokens: possible if td is empty ()\n text = re.sub(r\"(<\\/?t[dr].*?>)\\s+(<\\/?t[dr])\", r\"\\1\\2\", text)\n\n logging.debug(\"Writing HTML file: {}\".format(os.path.abspath(fileName)))\n file = open(os.path.join(fileName), \"w\", encoding='utf8')\n file.write(text)\n file.close()\n logging.debug(\" -- OK\")\n","repo_name":"grotsztaksel/Songbook_XMLtoEPUB","sub_path":"src/tools/html_writer.py","file_name":"html_writer.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"30379956883","text":"from PIL import Image, ImageFilter\nimport cv2\nimport os\nimport yaml\nimport numpy as np\n\nDATASET = \"../dataset/real documents/\"\n\n\ndef detect_page(img, show=False):\n inner_top = (0,0)\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n threshold = 55\n size = 89\n thresholded = cv2.adaptiveThreshold(\n gray,255,\n cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,\n size, threshold\n )\n rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (40, 75))\n closing = cv2.morphologyEx(thresholded, cv2.MORPH_CLOSE, rect_kernel, iterations=4)\n contours, _ = cv2.findContours(closing, cv2.RETR_EXTERNAL, \n cv2.CHAIN_APPROX_SIMPLE)\n\n img2 = img.copy()\n p = 0\n for cnt in contours:\n x, y, w, h = cv2.boundingRect(cnt)\n # convex_contour = cv2.convexHull(cnt)\n area = cv2.contourArea(cnt)\n # print(area)\n if area > 1000000:\n d1 = x\n d2 = img2.shape[1]-(x+w)\n if d1 > d2:\n p = 1\n inner_top = (x+w, y)\n else:\n inner_top = (x,y)\n cv2.rectangle(img2, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n if show:\n page = Image.fromarray(img2)\n page.show()\n return p, inner_top\n\ndef detect_cols(name, img, show=False):\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n threshold = 105\n size = 101\n if \"051\" in name:\n threshold = 85\n if \"184\" in name:\n threshold = 115\n\n thresholded = cv2.adaptiveThreshold(\n gray, 255,\n cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV,\n size, threshold\n )\n \n rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (60, 100))\n closing = cv2.morphologyEx(thresholded, cv2.MORPH_CLOSE, rect_kernel)\n contours, _ = cv2.findContours(closing, cv2.RETR_EXTERNAL, \n cv2.CHAIN_APPROX_SIMPLE)\n\n img2 = img.copy()\n col = 0\n cols = []\n for cnt in contours:\n x, y, w, h = cv2.boundingRect(cnt)\n area = cv2.contourArea(cnt)\n if area > 1000000 and col < 2:\n cv2.rectangle(img2, (x, y), (x + w, y + h), (0, 255, 0), 2)\n cols.append((x,y,w,h))\n col += 1\n\n if show:\n bounds = Image.fromarray(img2)\n bounds.show()\n clo = Image.fromarray(closing)\n clo.show()\n cols.sort(key= lambda x: x[0])\n return cols\n \n\ndef detect_lines(col_img, show=False):\n gray = cv2.cvtColor(col_img, cv2.COLOR_RGB2GRAY)\n threshold = 95\n size = 35\n\n thresholded = cv2.adaptiveThreshold(\n gray, 255,\n cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV,\n size, threshold\n )\n \n rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (100, 2))\n closing = cv2.morphologyEx(thresholded, cv2.MORPH_CLOSE, rect_kernel, iterations=4)\n contours, _ = cv2.findContours(closing, cv2.RETR_EXTERNAL, \n cv2.CHAIN_APPROX_SIMPLE)\n\n img2 = col_img.copy()\n baselines = []\n for cnt in contours:\n x, y, w, h = cv2.boundingRect(cnt)\n area = cv2.contourArea(cnt)\n if area > 10000:\n cv2.rectangle(img2, (x, y), (x + w, y + h), (0, 255, 0), 2)\n baselines.append(y+h)\n\n if show:\n bounds = Image.fromarray(img2)\n bounds.show()\n clo = Image.fromarray(closing)\n clo.show()\n\n return baselines\n\nif __name__ == \"__main__\": \n page_info = []\n dist = {0: [], 1: []}\n cols1 = {0: {\"x\": [], \"y\": [], \"w\": [], \"h\": []},\n 1: {\"x\": [], \"y\": [], \"w\": [], \"h\": []}\n }\n cols2 = {0: {\"x\": [], \"y\": [], \"w\": [], \"h\": []},\n 1: {\"x\": [], \"y\": [], \"w\": [], \"h\": []}\n }\n line1 = {0: [], 1: []}\n line2 = {0: [], 1: []}\n nb_lines = []\n for file in os.listdir(DATASET):\n # for file in [\"e-codices_csg-0231_084_max.jpg\"]:\n lines = list()\n cols = list()\n img = cv2.imread(DATASET+file)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n p, inner_top = detect_page(img)\n \n page_cols = detect_cols(file, img)\n c = 0\n for col in page_cols:\n col_lines = detect_lines(img[col[1]:(col[1]+col[3]), col[0]:(col[0]+col[2])], True)\n col_lines.sort()\n lines.append(col_lines)\n if c == 0:\n cols1[p][\"x\"].append(col[0])\n cols1[p][\"y\"].append(col[1])\n cols1[p][\"w\"].append(col[2])\n cols1[p][\"h\"].append(col[3])\n line1[p].append(col_lines[0])\n c = 1\n else:\n cols2[p][\"x\"].append(col[0])\n cols2[p][\"y\"].append(col[1])\n cols2[p][\"w\"].append(col[2])\n cols2[p][\"h\"].append(col[3])\n line2[p].append(col_lines[0])\n c = 0\n nb_lines.append(len(col_lines))\n # line1[p].append(lines[0][0])\n # line2[p].append(lines[1][0])\n\n for col_lines in lines:\n for i in range(len(col_lines)-1):\n d = col_lines[i+1]-col_lines[i]\n dist[p].append(d)\n with open('config.yaml', 'w') as writer:\n data_doc = {\n 'orient': {\n 0:\n {\n 'col1': (float(np.array(cols1[0][\"x\"]).mean()),\n float(np.array(cols1[0][\"y\"]).mean()),\n float(np.array(cols1[0][\"w\"]).mean()),\n float(np.array(cols1[0][\"h\"]).mean())\n ),\n 'col1_std': (float(np.array(cols1[0][\"x\"]).std()),\n float(np.array(cols1[0][\"y\"]).std()),\n float(np.array(cols1[0][\"w\"]).std()),\n float(np.array(cols1[0][\"h\"]).std())\n ),\n 'col2': (float(np.array(cols2[0][\"x\"]).mean()), \n float(np.array(cols2[0][\"y\"]).mean()),\n float(np.array(cols2[0][\"w\"]).mean()), \n float(np.array(cols2[0][\"h\"]).mean())\n ),\n 'col2_std': (float(np.array(cols2[0][\"x\"]).std()), \n float(np.array(cols2[0][\"y\"]).std()), \n float(np.array(cols2[0][\"w\"]).std()), \n float(np.array(cols2[0][\"h\"]).std())\n ),\n 'line1': float(np.array(line1[0]).mean()),\n 'line1_std': float(np.array(line1[0]).std()),\n 'line2': float(np.array(line2[0]).mean()),\n 'line2_std': float(np.array(line2[0]).std()),\n \"avg_line_d\": float(np.array(dist[0]).mean()),\n \"line_d_std\": float(np.array(dist[0]).std())\n },\n 1:\n {\n 'col1': (float(np.array(cols1[1][\"x\"]).mean()), \n float(np.array(cols1[1][\"y\"]).mean()), \n float(np.array(cols1[1][\"w\"]).mean()), \n float(np.array(cols1[1][\"h\"]).mean())\n ),\n 'col1_std': (float(np.array(cols1[1][\"x\"]).std()), \n float(np.array(cols1[1][\"y\"]).std()), \n float(np.array(cols1[1][\"w\"]).std()), \n float(np.array(cols1[1][\"h\"]).std())\n ),\n 'col2': (float(np.array(cols2[1][\"x\"]).mean()), \n float(np.array(cols2[1][\"y\"]).mean()), \n float(np.array(cols2[1][\"w\"]).mean()), \n float(np.array(cols2[1][\"h\"]).mean())\n ),\n 'col2_std': (float(np.array(cols2[1][\"x\"]).std()), \n float(np.array(cols2[1][\"y\"]).std()), \n float(np.array(cols2[1][\"w\"]).std()), \n float(np.array(cols2[1][\"h\"]).std())\n ),\n 'line1': float(np.array(line1[1]).mean()),\n 'line1_std': float(np.array(line1[1]).std()),\n 'line2': float(np.array(line2[1]).mean()),\n 'line2_std': float(np.array(line2[1]).std()),\n \"avg_line_d\": float(np.array(dist[1]).mean()),\n \"line_d_std\": float(np.array(dist[1]).std()) \n }\n }\n }\n yaml.dump(data_doc, writer)\n print(dist)\n","repo_name":"LemonSearch/Synthetic-Handwriting-Document-Images-Generator","sub_path":"Task4/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":8780,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"6288620693","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('userspace', '0002_auto_20150218_0944'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='userprofile',\n old_name='background_image',\n new_name='image',\n ),\n ]\n","repo_name":"CivilHub/CivilHub","sub_path":"userspace/migrations/0003_auto_20150223_1140.py","file_name":"0003_auto_20150223_1140.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"29"} +{"seq_id":"3106778564","text":"from Lista import Lista\nimport os\n\n\nclass Cliente(Lista):\n def __init__(self, rfc=\"\", nombre=\"\", telefono=\"\"):\n super().__init__(\"Clientes.json\")\n self.rfc = rfc\n self.nombre = nombre\n self.telefono = telefono\n\n def __str__(self):\n return f\"{self.rfc},{self.nombre},{self.telefono}\"\n\n def to_dict(self):\n listaDicc = []\n if type(self) == list:\n for item in self:\n if type(item) == dict:\n listaDicc.append(item)\n else:\n listaDicc.append(item.to_dict())\n return listaDicc\n elif type(self) == dict:\n listaDicc.append(self.listas)\n else:\n diccionario = {\"rfc\": self.rfc, \"nombre\": self.nombre, \"telefono\": self.telefono}\n\n listaDicc.append(diccionario)\n return diccionario\n\n def from_json(self):\n clientes_json = self.json.leer_de_json()\n clientes_obj = []\n for cliente in clientes_json:\n cli = Cliente(cliente[\"rfc\"], cliente[\"nombre\"], cliente[\"telefono\"])\n clientes_obj.append(cli)\n return clientes_obj\n\n\nif __name__ == \"__main__\":\n clientes = Cliente().from_json()\n\n print(clientes[0])\n\n\n","repo_name":"SebasRamz-Murillo/SimplePythonCRUD","sub_path":"Cliente.py","file_name":"Cliente.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"41674766541","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def removeNthFromEnd(self, head: Optional[ListNode], n: int) -> Optional[ListNode]:\n length , count = 0 , 2\n node = head\n \n while node:\n length += 1\n node = node.next\n\n prev = head\n curr = prev.next \n delete = length - n + 1\n \n while curr:\n \n if delete == 1:\n head = head.next\n return head\n \n if count == delete:\n prev.next = curr.next\n return head\n \n count += 1\n curr = curr.next\n prev = prev.next\n \n","repo_name":"YasubGetu/Competitive-programming","sub_path":"removeNthFromEnd.py","file_name":"removeNthFromEnd.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"24842820370","text":"import cv2\r\nimport numpy as np\r\n\r\n# K-means\r\ndef k_means(img, Class=5):\r\n # get shape\r\n H, W, C = img.shape\r\n np.random.seed(0) \r\n img = np.reshape(img, (H * W, -1))\r\n i = np.random.choice(np.arange(H * W), Class, replace=False)\r\n Cs = img[i].copy()\r\n print(Cs)\r\n # 上記まででランダムに5つのインデックスを取得している\r\n \r\n \r\n clss = np.zeros((H * W), dtype=int)\r\n\r\n # each pixel\r\n for i in range(H * W):\r\n # get distance from base pixel\r\n dis = np.sqrt(np.sum((Cs - img[i]) ** 2, axis=1))\r\n # get argmin distanc\r\n clss[i] = np.argmin(dis)\r\n\r\n # show\r\n out = np.reshape(clss, (H, W)) * 50\r\n out = out.astype(np.uint8)\r\n\r\n return out\r\n\r\n\r\n# read image\r\nimg = cv2.imread(\"Question_91_100\\imori.jpg\").astype(np.float32)\r\n\r\nout = k_means(img)\r\n\r\ncv2.imshow(\"result\", out)\r\ncv2.waitKey(0)\r\n","repo_name":"Yuma-Tsukakoshi/image_Processing_100","sub_path":"Question_91_100/practices_py/prac_92.py","file_name":"prac_92.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"26344461836","text":"import numpy as np\n\n\ndef accuracy_score(actual, predicted):\n\t\"\"\"\n\tComputes the accuracy\n\t\"\"\"\n\tm = actual.size\n\tcorrect = 0\n\n\tfor i in range(0, m):\n\t\tif actual[i] == predicted[i]:\n\t\t\tcorrect += 1\n\n\treturn correct / m\n","repo_name":"septa97/SP-server","sub_path":"app/utils/data_operation.py","file_name":"data_operation.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"5922826586","text":"import flask\nfrom App import application, db, login#, csrf\nfrom App.errors import APIError\nfrom App.models import User, Post\nfrom flask import render_template, request, jsonify, send_from_directory\nimport json\nfrom flask_login import current_user, login_required, login_user, logout_user\nfrom flask_wtf.csrf import generate_csrf, session, CSRFError\n\n#@login.user_loader\n#def load_user(id):\n# return User.query.get(int(id))\n\n@login.user_loader\ndef user_loader(id):\n return User.query.get(int(id))\n\n@application.route('/')\n@application.route('/index')\ndef index():\n return application.send_static_file(\"index.html\")\n\n@application.route('/hello')\ndef hello():\n return \"Hello, world!\"\n\n# Extract bodies from posts and send as a stringified list\n@application.route('/posts')\n@login_required\ndef sendPosts():\n posts = Post.query.filter_by(user_id=current_user.id)\n postBodies = list(map(lambda post: { \"text\" : post.body, \"ID\": post.id, \"coordinates\": post.location}, posts))\n postsString = json.dumps(postBodies)\n return postsString\n\n@application.route('/addpost', methods=['POST'])\n@login_required\ndef addPost():\n if request.method == 'POST':\n newPost = Post(body=request.json['text'], user_id=current_user.id,\n location=request.json['coordinates'])\n db.session.add(newPost)\n db.session.commit()\n justMade = {\n \"text\": newPost.body,\n \"ID\": newPost.id,\n \"coordinates\": newPost.location\n }\n return json.dumps(justMade)\n\n@application.route('/deletepost', methods=['POST'])\n@login_required\ndef deletePost():\n if request.method == 'POST':\n id = request.json['postID']\n post = Post.query.get(id)\n db.session.delete(post)\n db.session.commit()\n return sendPosts()\n\n@application.route('/login', methods=['GET','POST'])\ndef login():\n if current_user.is_authenticated:\n return \"loggedin\"\n user = User.query.filter_by(username=request.json[\"username\"]).first()\n if user is None or not user.check_password(request.json[\"password\"]):\n print(\"apierror\")\n raise APIError(\"Invalid username or password\")\n login_user(user)\n return \"success\"\n\n@application.route('/logout')\n@login_required\ndef logout():\n logout_user()\n return \"success\"\n\n@application.route('/register', methods=['POST'])\ndef register():\n user = User.query.filter_by(username=request.json[\"username\"]).first()\n if user is not None:\n raise APIError(\"Username taken\")\n newUser = User(username=request.json[\"username\"])\n newUser.set_password(request.json[\"password\"])\n db.session.add(newUser)\n db.session.commit()\n return \"success\"\n\n@application.route('/api/csrf')\ndef return_csrf():\n token = generate_csrf()\n \n response = jsonify({\"detail\": \"CSRF token set\"})\n response.headers.set(\"X-CSRFToken\", token)\n return response\n \n\n@application.errorhandler(APIError)\ndef handle_exception(err):\n return json.dumps({\"message\": err.description}), 400\n\n@application.errorhandler(401)\ndef handle(err):\n return err\n\n@application.errorhandler(CSRFError)\ndef handle_csrf_error(err):\n if application.config['WTF_CSRF_FIELD_NAME'] not in session:\n print(\"not in session\")\n return json.dumps({\"message\": err.description}), 400","repo_name":"jsanchez98/Geonotes-back","sub_path":"App/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"11548689809","text":"#!/usr/bin/python3\n# -*- coding: iso-8859-15 -*-\n\nimport time\nimport keras\n\n# from keras.models import Sequential\n# from keras.layers import (\n# Activation, Conv2D, Dense,\n# Dropout, BatchNormalization, Flatten,\n# MaxPooling2D\n# )\nimport keras.backend.tensorflow_backend as K\n\nfrom autoda.standard_augmentation import apply_transform\n\nfrom autoda.networks.utils import (\n _update_history, get_input_shape,\n)\nfrom autoda.networks.architectures import ARCHITECTURES\n\n\ndef standard_objective_function(data, configuration=None, benchmark=\"AlexNet\", max_epochs=40, batch_size=512, time_budget=900):\n\n # preprocess data\n x_train, y_train, x_validation, y_validation, x_test, y_test, data_mean, data_variance = data\n\n input_shape = get_input_shape(x_train) # NWHC\n\n num_classes = y_train.shape[1]\n\n train_history, runtime = {}, []\n\n used_budget, num_epochs, duration_last_epoch = 0., 0, 0.\n num_datapoints, *_ = x_train.shape\n\n start_time = time.time()\n\n config = K.tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)\n session = K.tf.Session(config=config)\n K.set_session(session)\n\n assert benchmark in ARCHITECTURES\n # AlexNet\n network_function = ARCHITECTURES[benchmark]\n model = network_function(num_classes=num_classes, input_shape=input_shape)\n\n with K.tf.device(\"/gpu:1\"):\n with session.graph.as_default():\n opt = keras.optimizers.Adam(lr=0.0016681005372000575)\n\n # Let's train the model using RMSprop\n model.compile(loss='categorical_crossentropy',\n optimizer=opt,\n metrics=['accuracy'])\n\n while(num_epochs < max_epochs) and \\\n (used_budget + 1.11 * duration_last_epoch < time_budget):\n\n print(\"Using standard data augmentation.\")\n\n # Fit the model on the batches augmented data generated by apply transform\n history = model.fit_generator(\n apply_transform(\n x_train, y_train,\n data_mean, data_variance,\n batch_size=batch_size\n ),\n steps_per_epoch=num_datapoints // batch_size,\n epochs=num_epochs + 1,\n validation_data=(x_validation, y_validation),\n initial_epoch=num_epochs\n )\n\n train_history = _update_history(train_history, history.history)\n\n num_epochs += len(history.history.get(\"loss\", []))\n duration_last_epoch = (time.time() - start_time) - used_budget\n used_budget += duration_last_epoch\n print(\"used_budget\", used_budget, \"duration_last_epoch\", duration_last_epoch, \"time_budget\", time_budget)\n runtime.append(time.time() - start_time)\n\n validation_loss, validation_accuracy = model.evaluate(x_validation, y_validation, verbose=0)\n\n result = {\n \"validation_loss\": validation_loss,\n \"validation_error\": 1 - validation_accuracy,\n \"used_budget\": used_budget,\n \"train_history\": train_history,\n \"configs\": configuration\n }\n\n if configuration:\n result[\"configs\"] = configuration.get_dictionary()\n else:\n result[\"configs\"] = {}\n\n return result\n\n","repo_name":"zemovi/AutoDA","sub_path":"autoda/networks/train_standard.py","file_name":"train_standard.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"18984909664","text":"from flask import Flask, send_file, jsonify\nimport requests, subprocess\napp = Flask(__name__)\n\n@app.route('/', methods=['GET'])\ndef index():\n return jsonify({'success': 'false',\n 'message': 'please PUT the image binary or uri'})\n\n@app.route('/canary')\ndef canary():\n return jsonify({'success': 'true',\n 'message': 'HTTP 200. All ok.'})\n\n@app.route('/', methods=['PUT'])\ndef upload_image():\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n flash('No image selected for uploading')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n #print('upload_image filename: ' + filename)\n flash('Image successfully uploaded and displayed')\n return render_template('upload.html', filename=filename)\n else:\n flash('Allowed image types are -> png, jpg, jpeg, gif')\n return redirect(request.url)\n\n\n@app.route('/testimagedl', methods=['GET'])\ndef create_record():\n sampleimageurl='https://gordon.byers.me/assets/img/die-bart-die.png'\n imagefile = requests.get(sampleimageurl)\n open('/tmp/localimage.png', 'wb').write(imagefile.content)\n\n return send_file('/tmp/localimage.png', mimetype='image/png')\n\n@app.route('/testimagefnet', methods=['GET'])\ndef testimage_fnet():\n print('Fnet image proc')\n sampleimageurl='https://gordon.byers.me/assets/img/die-bart-die.png'\n imagefile = requests.get(sampleimageurl)\n open('/tmp/localimage.png', 'wb').write(imagefile.content)\n\n fnetpath='/opt/miniconda/bin/fnet' #'./home/admingeneric/.local/bin/fnet' #/opt/miniconda/bin/fnet\n print(['Using fnetpath:', fnetpath])\n\n modelpath=''\n path_save_dir=''\n filepath=''\n gpu_id=''\n JSON=''\n\n print(fnetpath, '-h')\n out = subprocess.check_output([fnetpath, 'train', '--json', '/tmp/train_options.json'])\n print([fnetpath, 'predict','--path_model_dir',modelpath,'--json', JSON,'--path_save_dir',path_save_dir,'--path_tif',filepath,'--gpu_ids', gpu_id, '--no_signal'])\n\n return send_file('/tmp/localimage.png', mimetype='image/png')\n\nif __name__ == \"__main__\": \n app.run(host = '0.0.0.0', port = 5001, debug = True) \n","repo_name":"Gordonby/pythongettingstarted","sub_path":"fnet/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"14031435990","text":"file_a = open(\"06.05 CompareFileA.txt\", \"r\")\nfile_b = open(\"06.05 CompareFileB.txt\", \"r\")\n\ndiff_count = 0\nfor line_num, (line_a, line_b) in enumerate(zip(file_a, file_b), start=1):\n if line_a != line_b:\n print(f\"Line: {line_num} - File A: {line_a.strip()}\\nLine: {line_num} - File B: {line_b.strip()}\\n\")\n diff_count += 1\n\nprint(f\"{diff_count} differences\")\n\nfile_a.close()\nfile_b.close()","repo_name":"jjohnson360/IFSC1202","sub_path":"06.05 Compare Files.py","file_name":"06.05 Compare Files.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"8123346113","text":"# This is a sample Python script.\n\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\nimport random\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom ransac import ransac\n\n\ndef random_data(num_size):\n x = np.linspace(0, 10, num_size)\n y = 3 * x + 10\n\n random_x = []\n random_y = []\n\n for i in range(num_size):\n random_x.append(x[i] + random.uniform(-0.5, 0.5))\n random_y.append(y[i] + random.uniform(-0.5, 0.5))\n\n for i in range(num_size):\n random_x.append(random.uniform(0, 10))\n random_y.append(random.uniform(10, 40))\n\n random_x = np.array(random_x)\n random_y = np.array(random_y)\n\n return random_x, random_y\n\n\nif __name__ == '__main__':\n num_size = 100\n random_x, random_y = random_data(num_size=num_size)\n a, b = ransac(random_x, random_y,\n iters=10000,\n sigma=0.25,\n P=0.99)\n y_estimate = a * random_x + b\n\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 1, 1)\n ax1.scatter(random_x, random_y)\n ax1.plot(random_x, y_estimate)\n ax1.set_xlabel('x')\n ax1.set_ylabel('y')\n plt.show()\n","repo_name":"combofish/chips-get","sub_path":"Python3/Algorithm/RANSAC/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"29"} +{"seq_id":"19628322616","text":"from brian2 import *\nimport networkx as nx\nimport numpy as np\nimport math\nimport cmath\nimport csv\nimport time as TIME\nimport matplotlib.pyplot as plt\nimport sys\nimport cPickle as pickle\nimport thorns as th\nimport pyspike as spk\n\nmatrix = sys.argv[1]\ntrain = pickle.load(open(\"storeddata/\" + matrix + \"-train.p\", \"rb\"))\nN = len(train)\nduration = len(train[0])\n\nprint(\"--- plotting ---\")\n\n# create list of spike times from raster\nspiketimes = []\nfor n in range(len(train)):\n spiketimes.append([0])\n for t in range(len(train[n])):\n if (train[n][t] >= 0):\n spiketimes[n].append(t)\n\nst = th.make_trains(spiketimes)\n\nsimulation = plt.figure(figsize=(17,10))\nsimulation.add_subplot(2,1,1)\nth.plot_raster(st, markersize=2.5)\n\n\n# histogram for spikes\nsimulation.add_subplot(2,1,2)\nbin = 10000\nspikeHist = th.psth(st, bin)\n# spikeHist = th.psth(st, duration/1000)\nplt.plot(spikeHist[0])\nsmoothHist = np.convolve(spikeHist[0],1)\n# peaks = np.r_[False, smoothHist[1:] > smoothHist[:-1]] & numpy.r_[smoothHist[:-1] > smoothHist[1:], False]\npeaks = np.r_[False, spikeHist[0][1:] > spikeHist[0][:-1]] & numpy.r_[spikeHist[0][:-1] > spikeHist[0][1:], False]\nmaxs = []\nfor i in range(len(peaks)):\n if (peaks[i] == True):\n maxs.append(spikeHist[0][i])\n else:\n maxs.append(-0.001)\n\n\nplt.plot(maxs, linewidth=0, marker='o')\n# plt.plot(maxs, linestyle=\"\", marker='o', markersize=0.7)\n\nsys.stdout.write('\\x1b[1A')\nsys.stdout.write('\\x1b[2K')\nplt.ylabel(\"PST histogram\")\n# plt.xlim(xmin= 0, xmax=duration/1000)\nplt.xlim(xmin=0, xmax=len(spikeHist[0]))\nplt.ylim(ymin=0)\nplt.xticks([])\n\n\n\nplt.show(block=False)\n\n# savesim = raw_input(\"save simulation? \")\n# if(savesim == 'y'):\n# simname = raw_input(\"Simulation name: \")\n# if(simname == ''):\n# simname = matrix\n# plt.savefig('../simulation_files/SPIKE/' + simname + \"-spikehist\" + '.png')\n\n\nsimname = matrix\nplt.savefig('../simulation_files/SPIKE/' + simname + \"-spikehist\" + '.png')\n\nplt.close()\n","repo_name":"markagrios/research","sub_path":"code/SPIKE/plotspikehist.py","file_name":"plotspikehist.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"16963003442","text":"import math\nimport opendis.RangeCoordinates\n\nfrom lib.entity import entityManager\n\nfrom opendis.RangeCoordinates import rad2deg, deg2rad\nfrom opendis.dis7 import Vector3Float, Vector3Double, EulerAngles\n\n__author__ = \"EnriqueMoran\"\n\nclass CinematicManager:\n\n def __init__(self):\n orientation = EulerAngles() # Set valid 0, 0, 0 orientation\n orientation.psi = -3.141592653489793\n orientation.theta = -1.5707963266948965\n orientation.phi = 3.141592653589793\n entityManager.EntityManager().set_entity_orientation(orientation)\n\n def get_information(self):\n \"\"\"TBD\n Note that X, Y, Z can't be 0,0,0.\n \"\"\"\n gps = opendis.RangeCoordinates.GPS()\n current_lat, current_lon, current_alt = self.get_lat_lon_alt()\n heading = self.get_heading()\n speed = self.get_speed()\n return f\"current position: {current_lat}, {current_lon}\\n\" +\\\n f\"current altitude: {current_alt} m\\n\" +\\\n f\"heading: {heading} degrees\\n\" +\\\n f\"speed: {speed} m/s\"\n \n def set_position(self, lat, lon, alt=None):\n \"\"\"\n Set current position.\n \n :param lat: Latitude in decimal degrees\n :param lon: Longitude in decimal degrees\n :param alt: Altitude in meters\n \"\"\"\n alt = self.get_lat_lon_alt()[2] if alt is None else alt\n\n gps = opendis.RangeCoordinates.GPS()\n location = Vector3Float() \n location.x, location.y, location.z = gps.lla2ecef([deg2rad(lat), deg2rad(lon), alt])\n entityManager.EntityManager().set_entity_location(location)\n\n def set_speed(self, speed):\n \"\"\"\"\n Set speed.\n\n :param speed: Speed in meters per second.\n \"\"\"\n heading_rad = deg2rad(self.get_heading())\n pitch_rad = deg2rad(self.get_roll_pitch_yaw()[1])\n velocity = Vector3Float()\n velocity.x = speed * math.cos(heading_rad) * math.cos(pitch_rad)\n velocity.y = speed * math.sin(heading_rad) * math.cos(pitch_rad)\n velocity.z = speed * math.sin(pitch_rad)\n entityManager.EntityManager().set_entity_linear_velocity(velocity)\n \n def set_heading(self, heading):\n \"\"\"\n Set heading.\n\n :param heading: Heading in degrees.\n \"\"\"\n gps = opendis.RangeCoordinates.GPS()\n\n location = entityManager.EntityManager().get_entity_location()\n orientation = entityManager.EntityManager().get_entity_orientation()\n lat, lon, alt, roll, pitch, _ = gps.ecef2llarpy(location.x, location.y, location.z,\n orientation.psi, orientation.theta,\n orientation.phi)\n yaw = deg2rad(heading)\n orientation.psi, orientation.theta, orientation.phi = gps.llarpy2ecef(lat, lon, alt,\n roll, pitch, yaw)[3:]\n entityManager.EntityManager().set_entity_orientation(orientation)\n\n def get_lat_lon_alt(self):\n \"\"\"Return a vector containing lat, lon and altitude in decimal degrees.\n Note that X, Y, Z can't be 0,0,0.\"\"\"\n gps = opendis.RangeCoordinates.GPS()\n location = entityManager.EntityManager().get_entity_location()\n return gps.ecef2lla([location.x, location.y, location.z])\n \n def get_heading(self):\n \"\"\"Return heading in degrees.\"\"\"\n gps = opendis.RangeCoordinates.GPS()\n location = entityManager.EntityManager().get_entity_location()\n orientation = entityManager.EntityManager().get_entity_orientation()\n yaw = gps.ecef2llarpy(location.x, location.y, location.z,\n orientation.psi, orientation.theta, orientation.phi)[5]\n heading = rad2deg(yaw)\n heading = heading if heading >= 0 else 360 + heading\n return heading\n\n def get_roll_pitch_yaw(self):\n \"\"\"Return a vector containing roll, pitch and yaw in decimal degrees.\n Note that X, Y, Z can't be 0,0,0.\"\"\"\n gps = opendis.RangeCoordinates.GPS()\n location = entityManager.EntityManager().get_entity_location()\n orientation = entityManager.EntityManager().get_entity_orientation()\n roll, pitch, yaw = gps.ecef2llarpy(location.x, location.y, location.z,\n orientation.psi, orientation.theta, orientation.phi)[3:]\n return [rad2deg(roll), rad2deg(pitch), rad2deg(yaw)]\n\n def get_speed(self):\n \"\"\"Returns speed in m/s.\"\"\"\n speed = entityManager.EntityManager().get_entity_linear_velocity()\n return math.sqrt(speed.x**2 + speed.y**2 + speed.z**2)\n\n def process_cinematics(self, dt):\n \"\"\"\n :param dt: Time elapsed since last update in seconds.\n Does not calculate new Altitude.\n\n Return traveled distance in meters.\n \"\"\"\n lat_d, lon_d, alt = self.get_lat_lon_alt()\n current_lat = deg2rad(lat_d)\n current_lon = deg2rad(lon_d)\n heading = deg2rad(self.get_heading())\n speed = self.get_speed()\n\n distance = deg2rad((speed * dt) / (opendis.RangeCoordinates.WGS84().a))\n new_lat = math.asin(math.sin(current_lat) * math.cos(distance) + math.cos(current_lat) * \\\n math.sin(distance) * math.cos(heading))\n new_lon = current_lon + math.atan2(math.sin(heading) * math.sin(distance) * \\\n math.cos(current_lat), math.cos(distance) - \\\n math.sin(current_lat) * math.sin(new_lat))\n\n gps = opendis.RangeCoordinates.GPS()\n location = Vector3Double() \n location.x, location.y, location.z = gps.lla2ecef([rad2deg(new_lat), rad2deg(new_lon), alt])\n entityManager.EntityManager().set_entity_location(location)\n\n return (speed * dt)","repo_name":"EnriqueMoran/dis","sub_path":"TankSimulator/src/lib/cinematic/cinematicManager.py","file_name":"cinematicManager.py","file_ext":"py","file_size_in_byte":5913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"11875656613","text":"from django.shortcuts import render\n\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom payments import get_payment_model, RedirectNeeded\n\n#! /usr/bin/env python3.6\n\"\"\"\nPython 3.6 or newer required.\n\"\"\"\nimport json\nimport os\nimport stripe\n# This is your real test secret API key.\nstripe.api_key = \"sk_test_51HpdyPKezNXTk9UoXGAemFR4zqCjVXuua66dsYZ9gjP8QPnwFK3ESOuKXtIkN0aFBt2ohbAWRxLfrtWAxchSVqDy00QWpdcje4\"\n\nfrom flask import Flask, render_template, jsonify, request\n\n\napp = Flask(__name__, static_folder=\".\",\n static_url_path=\"\", template_folder=\".\")\n\n\ndef calculate_order_amount(items):\n # Replace this constant with a calculation of the order's amount\n # Calculate the order total on the server to prevent\n # people from directly manipulating the amount on the client\n return 1400\n\n\n@app.route('/create-payment-intent', methods=['POST'])\ndef create_payment():\n try:\n data = json.loads(request.data)\n intent = stripe.PaymentIntent.create(\n amount=calculate_order_amount(data['items']),\n currency='usd'\n )\n\n return jsonify({\n 'clientSecret': intent['client_secret']\n })\n except Exception as e:\n return jsonify(error=str(e)), 403\n\nif __name__ == '__main__':\n app.run()\n\n\ndef payments(request):\n return render(request, 'essietproject/payments/payment.html')\n\ndef payment_details(request, payment_id):\n payment = get_object_or_404(get_payment_model(), id=payment_id)\n try:\n form = payment.get_form(data=request.POST or None)\n except RedirectNeededasredirect_to:\n returnredirect(str(redirect_to))\n return TemplateResponse(request, 'payment.html',{'form': form, 'payment': payment})","repo_name":"fritzjackhunt/essietProjects","sub_path":"project1/paymentapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"24800436897","text":"# Matrix Chain Multiplication\n# Given a chain of matrices A1, A2, A3,.....An, you have to figure out the most efficient way to multiply these matrices i.e. determine where to place parentheses to minimise the number of multiplications.\n# You will be given an array p[] of size n + 1. Dimension of matrix Ai is p[i - 1]*p[i]. You need to find minimum number of multiplications needed to multiply the chain.\n# Input Format :\n# Line 1 : Integer n i.e. number of matrices\n# Line 2 : n + 1 integers i.e. elements of array p[] \n# Output Format :\n# Line 1 : Minimum number of multiplication needed\n# Constraints :\n# 1 <= n <= 100\n# Sample Input 1 :\n# 3\n# 10 15 20 25\n# Sample Output :\n# 8000\n# Sample Output Explanation :\n# There are two ways to multiply the chain - A1*(A2*A3) or (A1*A2)*A3.\n# If multiply in order A1*(A2*A3) then number of multiplications required are 15000.\n# If multiply in order (A1*A2)*A3 then number of multiplications required are 8000.\n# Thus minimum number of multiplications required are 8000\n\nimport sys\n\ndef mcm(p, i, j, dp):\n\n if i == j:\n return 0\n\n min_val = sys.maxsize\n for k in range(i, j):\n if dp[i][k] == -1:\n ans1 = mcm(p, i, k, dp)\n dp[i][k] = ans1\n else:\n ans1 = dp[i][k]\n \n if dp[k+1][j] == -1:\n ans2 = mcm(p, k+1, j, dp)\n dp[k+1][j] = ans2\n else:\n ans2 = dp[k+1][j]\n \n \n mCost = p[i-1]*p[k]*p[j]\n curr_val = ans1 + ans2 + mCost\n min_val = min(min_val, curr_val)\n \n return min_val\n\n\nn = int(input())\np = [int(i) for i in input().split()]\ndp = [[-1 for j in range(n+1)] for i in range(n+1)]\n\nprint(mcm(p, 1, n, dp))\n","repo_name":"visheshdvn/DSA","sub_path":"DSA/18DynamicProg/DP2/MCM.py","file_name":"MCM.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"5938807712","text":"# -*- coding: utf-8 -*-\n\nfrom openerp import models, fields, api\nimport logging\n_logger = logging.getLogger(__name__)\n\n\nclass website_seller(models.Model):\n\t_name = 'website.best.sellers'\n\n\tcategory = fields.Char(required=True,string='Category Name')\n\tcategory_id = fields.Many2one(\"product.public.category\",string=\"Category ID\")\n\tproduct_id = fields.Many2many(\"product.template\",string=\"Product ID\")\n# class website_best_sellers(models.Model):\n# _name = 'website_best_sellers.website_best_sellers'\n\n# name = fields.Char()\n\t@api.onchange('category_id')\n\tdef on_change_category_id(self):\n\t\t_logger.warning(self.category_id)\n\t\t# if self.category_id:\n\t\tproducters = self.env['product.template'].search([('public_categ_ids','in',self.category_id.id)])\n\t\tproduct_ids = [producter.id for producter in producters]\n\t\t_logger.warning(product_ids)\n\t\tres = {\n\t\t 'domain': {\n\t\t 'product_id': [('id', 'in', product_ids)],\n\t\t }\n\t\t }\n\t\treturn res","repo_name":"jansumanpan/website_best_seller","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"70610856080","text":"# -*- coding: utf-8 -*-\nimport logging\nimport os\nimport re\nimport sys\n\n\nclass Helpers:\n def __init__(self, logger=None):\n if logger is None:\n self.logger = logging.getLogger(__name__)\n else:\n self.logger = logger\n\n @staticmethod\n def extract_sequence_num(filename):\n sequence_num = re.search(\n '([0-9]+)[^0-9].+',\n os.path.basename(filename)\n ).group(1)\n\n return int(sequence_num)\n\n def append_migration(self, migrations, filename):\n try:\n migrations.append((self.extract_sequence_num(filename), filename))\n except AttributeError:\n self.logger.error(\"Invalid filename found: {}\".format(filename))\n sys.exit(1)\n\n def find_migrations(self, sql_directory):\n migrations = []\n for filename in os.listdir(sql_directory):\n if filename.endswith(\".sql\"):\n self.append_migration(\n migrations,\n str(os.path.join(sql_directory, filename))\n )\n return migrations\n\n @staticmethod\n def sort_migrations(migrations):\n if (\n all(isinstance(tup, tuple) for tup in migrations) and\n all(isinstance(tup[0], int) for tup in migrations) and\n all(isinstance(tup[1], str) for tup in migrations)\n ):\n migrations.sort(key=lambda tup: tup[0])\n else:\n raise TypeError(\n \"Migrations list did not contain only tuple(int, str)\")\n\n def populate_migrations(self, sql_directory):\n migrations = self.find_migrations(sql_directory)\n self.sort_migrations(migrations)\n return migrations\n\n @staticmethod\n def get_unprocessed_migrations(db_version, migrations):\n return [tup for tup in migrations if tup[0] > int(db_version)]\n","repo_name":"beveradb/migration_runner","sub_path":"migration_runner/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"3975255739","text":"from distutils.core import setup, Extension\r\n\r\nmodule = Extension('arbres', sources=['module.cpp'], libraries=[\"user32\", \"gdi32\"])\r\n\r\nsetup(name='arbres',\r\n version='1.0',\r\n description=\"Module d'affichage d'arbres\",\r\n long_description=\"Module permettant d'afficher à l'écran un signe, un chiffre, d'obtenir la taille de l'écran, de faire un cercle ou un trait.\",\r\n author=\"reza0310\",\r\n author_email=\"reza031077@yahoo.fr\",\r\n ext_modules=[module])","repo_name":"reza0310/NSI","sub_path":"arbres/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"22156459939","text":"\"\"\"\nThis script is the entry point for the Biometrics Tracking application. It attempts to open a JSON formatted\nfile which contains the application's configuration info. If this file is not found, which will happen the first\ntime the application is invoked, the config.createconfig.ConfigGUI Window will be presented to allow configuration\nparameters to be set. If the import succeeds, the gui.Application Window will be presented.\n\"\"\"\nimport argparse\nimport json\nimport logging\nimport logging.config\nimport os\nimport pathlib\nimport sys\n\nimport ttkbootstrap as ttkb\nfrom ttkbootstrap.constants import *\nfrom typing import Optional\n\nimport biometrics_tracker.config.createconfig as config\nimport biometrics_tracker.ipc.messages as messages\nimport biometrics_tracker.ipc.queue_manager as queues\nimport biometrics_tracker.config.json_handler as jh\nimport biometrics_tracker.model.datapoints as dp\nimport biometrics_tracker.model.persistence as per\nimport biometrics_tracker.main.scheduler as sched\nimport biometrics_tracker.gui.tk_gui as gui\n\n\nclass ErrorDialog(ttkb.Window):\n \"\"\"\n Used to present error messages to the user\n \"\"\"\n def __init__(self, message: str):\n \"\"\"\n Creates an instance of ErrorDialog\n\n :param message: the message to be displayed\n :type message: str\n\n \"\"\"\n ttkb.Window.__init__(self, title='Biometrics Tracker Setup', themename='darkly')\n ttkb.Label(self, text=message).grid(row=0, column=0, padx=5, pady=5, sticky=NW)\n ttkb.Button(self, text=\"OK\", command=self.close, default='normal').grid(row=1, column=0, padx=5, pady=5,\n sticky=NE)\n self.grid()\n self.mainloop()\n\n def close(self):\n \"\"\"\n Close the error message GUI\n\n :return: None\n\n \"\"\"\n self.quit()\n self.withdraw()\n\n\nclass Launcher:\n \"\"\"\n Launches the Biometrics Tracker app, or if this is the first time invocation, prompt for configuation info,\n then start the application\n \"\"\"\n def __init__(self, queue_mgr: queues.Queues):\n \"\"\"\n Create and instance of Launcher\n\n :param queue_mgr: the queue manager object\n :type queue_mgr: biometrics_tracker.ipc.queue_mgr.Queues\n\n \"\"\"\n self.queue_mgr = queue_mgr\n self.db: Optional[per.DataBase] = None\n self.person: Optional[dp.Person] = None\n self.args: Optional[argparse.Namespace] = None\n\n def start_database(self, filename: str):\n \"\"\"\n Start the database thread\n\n :param filename: The file name of the SQLite3 database\n :type filename: str\n :return: None\n\n \"\"\"\n self.db = per.DataBase(filename, self.queue_mgr, block_req_queue=True)\n self.db.start()\n\n def pre_launch(self, config_path: pathlib.Path) -> config.ConfigInfo:\n with config_path.open(mode='tr', encoding='UTF-8') as cfg:\n config_json = cfg.read()\n config_info: config.ConfigInfo = json.loads(eval(config_json), object_hook=jh.config_object_hook)\n logging.config.dictConfig(config_info.logging_config)\n if self.db is None:\n self.start_database(pathlib.Path(config_info.db_dir_path, 'biometrics.db').__str__())\n return config_info\n\n def launch_gui(self, config_path: pathlib.Path) -> bool:\n \"\"\"\n Launch the Biometric Tracker GUI\n\n :param config_path: a pathlib object connected to the configuration file\n :type config_path: pathlib.Path\n :return: quit flag\n :rtype: bool\n\n \"\"\"\n config_info = self.pre_launch(config_path)\n app = gui.Application(config_info, self.queue_mgr)\n app.mainloop()\n app.destroy()\n if app.dispatcher is not None:\n app.dispatcher.join()\n self.queue_mgr.send_db_req_msg(messages.CloseDataBaseReqMsg(destination=per.DataBase, replyto=None))\n return True\n\n def launch_config(self, homepath: pathlib.Path) -> bool:\n \"\"\"\n Launch a GUI to prompt the user for configuration info\n\n :param homepath: a pathlib object associated with the user's home directory\n :type homepath: pathlib.Path\n :return: quit flag\n :rtype: bool\n\n \"\"\"\n app_dir_path: pathlib.Path = pathlib.Path(homepath, 'biometrics-tracker')\n config_gui = config.ConfigGUI(app_dir_path)\n config_info = config_gui.ask_config()\n quit: bool = True\n if config_info.db_dir_path is not None:\n self.start_database(config_info.db_dir_path.name)\n self.queue_mgr.send_db_req_msg(messages.CreateDataBaseReqMsg(destination=per.DataBase,\n replyto=None))\n msg: messages.CompletionMsg = self.queue_mgr.check_completion_queue(block=True)\n self.queue_mgr.send_db_req_msg(messages.CloseDataBaseReqMsg(destination=per.DataBase,\n replyto=None))\n if not msg.status == messages.Completion.SUCCESS:\n ErrorDialog(message='Database creation failed. The Biometrics Tracker can not be started.')\n quit = True\n else:\n quit = True\n return quit\n\n def launch_scheduler(self, config_path: pathlib.Path) -> bool:\n \"\"\"\n Launch the Biomentrics Tracker Scheduler\n\n :param config_path: a Path object pointing to the configuration file\n :type config_path: pathlib.Path\n :return: quit flag\n :rtype: bool\n\n \"\"\"\n config_info: config.ConfigInfo = self.pre_launch(config_path)\n scheduler = sched.Scheduler(config_info, self.queue_mgr, start_dispatcher=True)\n scheduler.start()\n return True\n\n def launch_scheduled_entry(self, config_path: pathlib.Path) -> bool:\n \"\"\"\n Launch the Biometric Tracker GUI\n\n :param config_path: a pathlib object connected to the configuration file\n :type config_path: pathlib.Path\n :return: quit flag\n :rtype: bool\n\n \"\"\"\n def retrieve_schedule(msg: messages.ScheduleEntriesMsg):\n schedule = msg.entries[0]\n app = gui.ScheduledEntryWindow(config_info=config_info, queue_mgr=self.queue_mgr,\n schedule=schedule, person=self.person)\n app.mainloop()\n app.destroy()\n self.queue_mgr.send_db_req_msg(messages.CloseDataBaseReqMsg(destination=per.DataBase, replyto=None))\n\n def retrieve_person(msg: messages.PersonMsg):\n self.person = msg.payload\n self.queue_mgr.send_db_req_msg(messages.ScheduleEntryReqMsg(destination=per.DataBase,\n replyto=retrieve_schedule,\n person_id=self.person.id,\n seq_nbr=self.args.seq[0],\n last_triggered=None,\n operation=messages.DBOperation.RETRIEVE_SINGLE))\n\n config_info = self.pre_launch(config_path)\n self.queue_mgr.send_db_req_msg(messages.PersonReqMsg(destination=per.DataBase,\n replyto=retrieve_person,\n person_id=self.args.id[0],\n operation=messages.DBOperation.RETRIEVE_SINGLE))\n done: bool = False\n while not done:\n msg = self.queue_mgr.check_db_resp_queue(block=True)\n if msg is not None:\n msg.destination(msg)\n if msg.__class__ == messages.ScheduleEntriesMsg:\n done = True\n return True\n\n\ndef launch():\n \"\"\"\n Check for a configuration file. If one is found, launch the Biometrics Tracking GUI or Scheduler or Config GUI,\n according to the command line argument. If not, launch the Config GUI\n\n \"\"\"\n\n quit: bool = False\n if sys.platform[0:3] == 'win':\n home_str = os.environ['HOMEPATH']\n else:\n home_str = os.environ['HOME']\n homepath: pathlib.Path = pathlib.Path(home_str)\n queue_mgr = queues.Queues(sleep_seconds=.5)\n launcher = Launcher(queue_mgr)\n while not quit:\n config_path: pathlib.Path = pathlib.Path(homepath, os.sep.join(['biometrics-tracker', 'config',\n 'config_info.json']))\n if config_path.exists():\n arg_parser = argparse.ArgumentParser(prog='biotrack')\n sub_parser = arg_parser.add_subparsers(title='commands', dest='subcmd',\n metavar='config | gui | scheduler | scheduled-entry',\n help='config = intitial configuration, gui = start application GUI,'\n ' scheduler = start scheduler process, scheduled-entry = '\n 'initiate Scheduled Entry session',\n required=True)\n\n config_parser = sub_parser.add_parser('config')\n config_parser.set_defaults(func=lambda hp=homepath: launcher.launch_config(homepath=hp))\n scheduler_parser = sub_parser.add_parser('scheduler')\n scheduler_parser.set_defaults(func=lambda cp=config_path: launcher.launch_scheduler(config_path=cp))\n sched_entry_parser = sub_parser.add_parser('scheduled-entry')\n sched_entry_parser.set_defaults(func=lambda cp=config_path: launcher.launch_scheduled_entry(config_path=cp))\n sched_entry_parser.add_argument(dest='id', type=str, nargs=1,\n help='Person ID')\n sched_entry_parser.add_argument(dest='seq', type=int, nargs=1,\n help='Schedule sequence number')\n gui_parser = sub_parser.add_parser('gui')\n gui_parser.set_defaults(func=lambda cp=config_path: launcher.launch_gui(config_path=cp))\n\n launcher.args = arg_parser.parse_args()\n quit = launcher.args.func()\n\n else:\n quit = launcher.launch_config(homepath)\n\n sys.exit()\n\n\nif __name__ == '__main__':\n launch()\n","repo_name":"stroudcuster/biometrics-tracker","sub_path":"biometrics_tracker/main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"30046108964","text":"import threading\nimport driver.database as db\nimport time\nimport pairing\nimport driver.announce as an\n\n# provide threaded framework for pairing procedure\n\nblocked = False\nsem_pairing_start = threading.Semaphore(0)\nsem_access_blocked = threading.Semaphore(1)\n\n#thread-safe function to read blocked variable\ndef get_blocked():\n sem_access_blocked.acquire()\n bl = blocked\n sem_access_blocked.release()\n return bl\n\n#thread-safe function to write blocked variable\ndef set_blocked(bval):\n global blocked\n sem_access_blocked.acquire()\n blocked = bval\n sem_access_blocked.release()\n\n#request pairing procedure to start\ndef request_pairing():\n if(get_blocked()):\n return False\n sem_pairing_start.release()\n return True\n\n# check if initial pairing becomes necessary during runtime\n# called as thread\ndef check_initial_necessary():\n dbcon = db.DB()\n while(True):\n if (dbcon.num_users() == 0) and get_blocked() == False:\n request_pairing()\n time.sleep(1)\n\n# main loop that waits for any condition that requires pairing\n# to start\ndef pairing_loop():\n while(True):\n dbcon = db.DB()\n sem_pairing_start.acquire() # wait for request\n print(\"pairing started!\")\n set_blocked(True) # signal that pairing is running\n initial = dbcon.num_users() == 0 # find out if initial is necessary\n if(initial):\n an.disannounce() # if initial is necessary, disannounce service\n p_result = pairing.pair(dbcon.get_uuid(), initial)\n if(initial):\n an.announce() # if initial, reannounce service\n if(p_result[0]):\n # store new client to db\n dbcon.insert_user(str(p_result[1]), p_result[3], p_result[2][0], p_result[2][1], p_result[2][2])\n set_blocked(False) # signal that pairing is free to restart\n\n# init function, creates threads\ndef init():\n pth = threading.Thread(target=pairing_loop)\n pth.daemon = True\n pth.start()\n\n wth = threading.Thread(target=check_initial_necessary)\n wth.daemon = True\n wth.start()\n\n","repo_name":"seemoo-lab/wisec23-speaker-bootstrapping","sub_path":"server/driver/pairing_thread.py","file_name":"pairing_thread.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"6854346683","text":"from DIRAC.ResourceStatusSystem.Policy.Configurations import POLICIESMETA as DIRACPOLICIESMETA\n\n__RCSID__ = '$Id: $'\n\nPOLICIESMETA = DIRACPOLICIESMETA\n\nBESPOLICIESMETA = {\n 'SiteSAM' : {\n 'description' : 'Policy based on site SAM information',\n 'module' : 'SiteSAMPolicy',\n 'command' : ( 'SAMCommand', 'SAMCommand' ),\n 'args' : None\n }\n}\n\nPOLICIESMETA.update( BESPOLICIESMETA )\n","repo_name":"besdiracgrid/IHEPDIRAC","sub_path":"ResourceStatusSystem/Policy/Configurations.py","file_name":"Configurations.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"2372965230","text":"import time\nfrom llama_cpp import Llama, llama_cpp\nimport numpy as np\nmodel_path = \"ggml-model-q4_0.bin\"\nllm = Llama(model_path=model_path, logits_all=True, n_batch=1, n_threads=4)\nnvocab = llama_cpp.llama_n_vocab(llm.ctx)\n\n\ndef predict_old(query: str, max_tokens: int = 1, top_k: int = 10):\n # Encode the query using the bi-encoder and find potentially relevant passages\n start_time = time.time()\n\n llm.reset()\n tokens = llm.tokenize(query.encode())\n ntokens = 1\n\n rs = []\n for i in range(max_tokens):\n llm.eval(tokens)\n x = llama_cpp.llama_get_logits(llm.ctx)\n X = np.ctypeslib.as_array(x, shape=(ntokens, nvocab,)).copy()\n topktokens = np.argsort(X[0, :])[-top_k:][::-1]\n tokens.append(topktokens[0])\n topklogits = X[0, topktokens]\n strings = [llm.detokenize([t]).decode() for t in topktokens]\n r = [{\"string\": s, \"prob\": p} for s, p in zip(strings, topklogits)]\n rs.append(r)\n print(r)\n\n end_time = time.time()\n\n print(f\"Elapsed time: {end_time - start_time:.2f} seconds\")\n print(\"\\n\\n========\\n\")\n\n return rs\n\n\ndef predict(query: str, max_tokens: str, top_k: int):\n # Encode the query using the bi-encoder and find potentially relevant passages\n start_time = time.time()\n for k in range(1, top_k+1):\n llm.reset()\n tokens = llm.tokenize(query.encode())\n ntokens = 1\n llm.eval(tokens)\n x = llama_cpp.llama_get_logits(llm.ctx)\n X = np.ctypeslib.as_array(x, shape=(ntokens, nvocab,)).copy()\n token0 = np.argsort(X[0, :])[-k]\n tokens.append(token0)\n completion_tokens = []\n for t in llm.generate(tokens, top_k=1, top_p=0.95, temp=0, repeat_penalty=1.1):\n completion_tokens.append(t)\n if len(completion_tokens) >= max_tokens:\n break\n yield llm.detokenize([token0]+completion_tokens).decode()\n\n end_time = time.time()\n\n print(f\"Elapsed time: {end_time - start_time:.2f} seconds\")\n print(\"\\n\\n========\\n\")\n\n\nif __name__ == \"__main__\":\n predict(\"Cual es la capital de Argentina?\")\n","repo_name":"pabloriera/llmtopk","sub_path":"predict_llmcpp.py","file_name":"predict_llmcpp.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"32583731313","text":"import utils\nfrom database import database\nimport rest\nfrom stock_quote import stock_quote\nfrom logger import logger\n\n\nclass Tickers:\n \"\"\"\n Ticker object\n \"\"\"\n\n def __init__(self):\n self.database = database()\n self.stock_quote = stock_quote()\n self.logger = logger('tickers')\n\n def collect_and_send(self, ticker=None):\n \"\"\"\n Fetching ticker information to the server\n \"\"\"\n if not ticker:\n for ticker in self.database.get_all_tickers():\n ticker_data = self.collect(ticker)\n if ticker_data and not self.send(ticker_data):\n return False\n return True\n else:\n ticker_data = self.collect(ticker)\n if ticker_data and self.send(ticker_data):\n return True\n else:\n return False\n\n def collect(self, ticker=None):\n \"\"\"\n Collecting ticker data\n \"\"\"\n if ticker:\n ticker_consensus = self.database.get_consensus(ticker)\n # ticker_yahoo = self.stock_quote.get_ticker_data(ticker)\n name = ticker\n long_name = ''\n # long_name = ticker_yahoo['long_name']\n # last_stock_price = ticker_yahoo['last_stock_price']\n last_stock_price = 0\n consensus_min = ticker_consensus['min']\n consensus_avg = ticker_consensus['avg']\n consensus_max = ticker_consensus['max']\n slug = utils.slugify(ticker)\n\n data = {\n 'name': name,\n 'long_name': long_name.replace('\"', ''),\n 'last_stock_price': last_stock_price,\n 'consensus_min': consensus_min,\n 'consensus_avg': consensus_avg,\n 'consensus_max': consensus_max,\n 'display': 0,\n 'slug': slug\n }\n return data\n else:\n return None\n\n def send(self, data=None):\n \"\"\"\n Sending data to the API\n \"\"\"\n\n if data:\n if rest.send(\"POST\", \"/api/tickers/\", data):\n \"\"\"Trying to send POST\"\"\"\n self.logger.debug(\"Ticker data create\")\n return True\n else:\n if rest.send(\"PUT\", \"/api/tickers/\", data):\n \"\"\"Trying to send PUT\"\"\"\n self.logger.debug(\"Ticker data update\")\n return True\n else:\n # Literally, something would be wrong on the front-end side, if this does not work\n self.logger.error(\"Ticker data update fail, nothing else to try\")\n return False\n else:\n return False\n","repo_name":"mnorkin/miniature-nemesis","sub_path":"model/tickers.py","file_name":"tickers.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"73932714319","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\"\nimport pandas as pd\nfrom collections import OrderedDict\n# from json import dump\n\n# Настройка отображения\npd.set_option('max_columns', 20)\npd.set_option('max_rows', 50)\npd.set_option('line_width', 600)\npd.set_option('max_colwidth', 100)\npd.set_option('notebook_repr_html', False)\n\n\ndef get_unique_elements(series):\n\t\"\"\"\n\tExtraction lists in elements in Series\n\tRemoving duplicates from elements in Series\n\tSorting lists and return them\n\t[IN] pd.Series\n\t[OUT] list\n\t\"\"\"\n\ta = []\n\tfor item in series.tolist():\n\t\titem = item.split()\n\t\ta.append(item[0]) if (len(item) == 1) else [a.append(item[i]) for i in xrange(len(item))]\n\treturn sorted(list(set(a)))\n\n\ndef get_common_elements(dict1, dict2):\n\t\"\"\"\n\tConvert dicts to lists\n\tReturn common elements from input dicts\n\t[IN] dict, dict\n\t[OUT] list\n\t\"\"\"\n\tlist1 = list(dict1)[0]\n\tlist2 = list(dict2)[0]\n\treturn sorted(set(list1).intersection(list2))\n\n\ndf = pd.read_csv(\"/home/sergey/Dropbox/Coding/d3/ssuprunenko.github.io/data/ngpt-stations.csv\",\n\t\t\t\t\tsep=';', encoding='utf-8')\n\nuniq_okrugs = [u\"СВАО\", u\"ВАО\", u\"ЦАО\", u\"ЮВАО\", u\"ЮАО\", u\"ЮЗАО\", u\"ЗАО\", u\"СЗАО\", u\"САО\"]\n\ndistricts = (\n\t{\n\tu\"САО\": [(u\"Восточное Дегунино\",9), (u\"Дмитровский\",0), (u\"Бескудниковский\",0),\n\t\t\t (u\"Тимирязевский\",9), (u\"Аэропорт\",2), (u\"Савеловский\",9), (u\"Беговой\",2),\n\t\t\t (u\"Хорошевский\",7), (u\"Сокол\",2), (u\"Войковский\",2), (u\"Коптево\",2),\n\t\t\t (u\"Головинский\",2), (u\"Ховрино\",2), (u\"Левобережный\",2), (u\"Западное Дегунино\",2),\n\t\t\t (u\"Молжаниновский\",0),\n\t\t\t (u\"\",0)],\n\n\tu\"СВАО\":[(u\"Северное Медведково\",6), (u\"Южное Медведково\",6), (u\"Лосиноостровский\",6),\n\t\t\t (u\"Бабушкинский\",6), (u\"Ярославский\",6), (u\"Свиблово\",6), (u\"Ростокино\",13),\n\t\t\t (u\"Алексеевский\",6), (u\"Марьина роща\",10), (u\"Останкинский\",13), (u\"Бутырский\",9),\n\t\t\t (u\"Марфино\",13), (u\"Отрадное\",9), (u\"Алтуфьевский\",9), (u\"Бибирево\",9),\n\t\t\t (u\"Лианозово\",9), (u\"Северный\",0)],\n\n\tu\"ВАО\": [(u\"Северное Измайлово\",3), (u\"Восточный\",0), (u\"Измайлово\",3), (u\"Восточное Измайлово\",3),\n\t\t\t (u\"Ивановское\",0), (u\"Новокосино\",8), (u\"Косино-Ухтомский\",0),\n\t\t\t (u\"Вешняки\",7), (u\"Новогиреево\",8), (u\"Перово\",8), (u\"Соколиная гора\",3), (u\"Сокольники\",1),\n\t\t\t (u\"Богородское\",1), (u\"Преображенское\",1), (u\"Метрогородок\",0), (u\"Гольяново\",3),\n\t\t\t (u\"\",0)],\n\n\tu\"ЦАО\": [(u\"Мещанский\",10), (u\"Красносельский\",1), (u\"Басманный\",3), (u\"Таганский\",7),\n\t\t\t (u\"Замоскворечье\",2), (u\"Якиманка\",6), (u\"Хамовники\",1), (u\"Арбат\",4),\n\t\t\t (u\"Пресненский\",7), (u\"Тверской\",9),\n\t\t\t (u\"\",0),(u\"\",0),(u\"\",0),(u\"\",0),(u\"\",0),(u\"\",0),(u\"\",0)],\n\n\tu\"ЮВАО\":[(u\"Выхино-Жулебино\",7), (u\"Рязанский\",7), (u\"Капотня\",0), (u\"Братеево\",2),\n\t\t\t (u\"Некрасовка\",0), (u\"Марьино\",10), (u\"Люблино\",10), (u\"Текстильщики\",7),\n\t\t\t (u\"Кузьминки\",7), (u\"Нижегородский\",7), (u\"Лефортово\",8), (u\"Южнопортовый\",7),\n\t\t\t (u\"\",0),(u\"\",0),(u\"\",0),(u\"\",0),(u\"\",0)],\n\n\tu\"ЮАО\": [(u\"Нагатинский затон\",2), (u\"Нагатино-Садовники\",2), (u\"Даниловский\",2),\n\t\t\t (u\"Царицыно\",2), (u\"Москворечье-Сабурово\",2),\n\t\t\t (u\"Орехово-Борисово Северное\",2), (u\"Орехово-Борисово Южное\",2), (u\"Зябликово\",10),\n\t\t\t (u\"Бирюлево Восточное\",0), (u\"Братеево\",2), (u\"Бирюлево Западное\",9), (u\"Чертаново Южное\",9),\n\t\t\t (u\"Чертаново Центральное\",9), (u\"Чертаново Северное\",9), (u\"Нагорный\",9),\n\t\t\t (u\"Донской\",9),\n\t\t\t (u\"\",0)],\n\n\tu\"ЮЗАО\":[(u\"Гагаринский\",1), (u\"Академический\",6), (u\"Котловка\",9), (u\"Зюзино\",11),\n\t\t (u\"Северное Бутово\",12), (u\"Южное Бутово\",12), (u\"Ясенево\",6), (u\"Теплый Стан\",6),\n\t\t (u\"Обручевский\",6), (u\"Коньково\",6), (u\"Черемушки\",6), (u\"Ломоносовский\",1),\n\t\t (u\"\",0),(u\"\",0),(u\"\",0),(u\"\",0),(u\"\",0)],\n\n\tu\"ЗАО\": [(u\"Филевский парк\",4), (u\"Крылатское\",3), (u\"Дорогомилово\",4), (u\"Раменки\",1),\n\t\t (u\"Проспект Вернадского\",1), (u\"Внуково\",0), (u\"Ново-Переделкино\",0),\n\t\t (u\"Тропарево-Никулино\",1), (u\"Очаково-Матвеевское\",0), (u\"Солнцево\",0),\n\t\t (u\"Кунцево\",3), (u\"Можайский\",3), (u\"Фили-Давыдково\",4),\n\t\t (u\"\",0),(u\"\",0),(u\"\",0),(u\"\",0)],\n\n\tu\"СЗАО\":[(u\"Куркино\",0), (u\"Северное Тушино\",7), (u\"Южное Тушино\",7), (u\"Покровское-Стрешнево\",7),\n\t\t\t (u\"Хорошево-Мневники\",7), (u\"Щукино\",7), (u\"Строгино\",3), (u\"Митино\",3),\n\t\t\t (u\"\",0),(u\"\",0),(u\"\",0),(u\"\",0),(u\"\",0),(u\"\",0),(u\"\",0),(u\"\",0),(u\"\",0)]\n\t})\n\n\ncommon_matrix = []\ndata = []\nfor okrug in uniq_okrugs:\n\tfor district in districts[okrug]:\n\t\troutes = ({'okrug': okrug, 'district':district[0],\n\t\t\t\t 'routes':get_unique_elements(df[df.UPRAVA == district[0]].ROUTES),\n\t\t\t\t 'metro':district[1]})\n\t\tdata.append(routes)\n\ndata = pd.DataFrame(data, columns=['okrug', 'district', 'routes', 'metro'])\ndata.to_json('../data/districts.json',orient='records',force_ascii=False)\n\n\nfor okrug in uniq_okrugs:\n\tlocal_matrix = []\n\tfor parent_district in data[data.okrug == okrug].district:\n\t\tfor district in data[data.okrug == okrug].district:\n\t\t\troutes = len(get_common_elements(data[data.district == parent_district].routes,\n\t\t\t\t\t\t data[data.district == district].routes))\n\t\t\tlocal_matrix.append(routes)\n\t\tcommon_matrix.append({'okrug': okrug, 'district': parent_district,\n\t\t\t\t\t\t\t 'routes': local_matrix})\n\t\t\t\t\t\t \t # 'metro': data[data.district == parent_district].metro.values[0]})\n\t\tlocal_matrix = []\n\ncommon_matrix = pd.DataFrame(common_matrix, columns=['okrug', 'district', 'routes'])\ncommon_matrix.to_json('../data/matrix.json',orient='records',force_ascii=False)\n\n\nuniq_okrugs = ([u\"СВАО\", u\"ВАО\", u\"ЦАО\", u\"ЮВАО\", u\"ЮАО\", u\"ЮЗАО\", u\"ЗАО\", u\"СЗАО\", u\"САО\",\n\t\t\t (u\"\",0),(u\"\",0),(u\"\",0),(u\"\",0),(u\"\",0),(u\"\",0),(u\"\",0),(u\"\",0)])\ncommon_matrix_okr = []\ndata_okr = []\nfor okrug in uniq_okrugs:\n\troutes = ({'okrug': okrug, 'routes':get_unique_elements(df[df.OKRUG == okrug].ROUTES)})\n\tdata_okr.append(routes)\ndata_okr = pd.DataFrame(data_okr, columns=['okrug', 'routes'])\n\nlocal_matrix = []\nfor parent_okrug in data_okr.okrug:\n\tfor okrug in data_okr.okrug:\n\t\troutes = len(get_common_elements(data_okr[data_okr.okrug == parent_okrug].routes,\n\t\t\t\t\t\t data_okr[data_okr.okrug == okrug].routes))\n\t\tlocal_matrix.append(routes)\n\tcommon_matrix_okr.append({'okrug': parent_okrug, 'routes': local_matrix})\n\tlocal_matrix = []\n\ncommon_matrix_okr = pd.DataFrame(common_matrix_okr, columns=['okrug', 'routes'])\ncommon_matrix_okr.to_json('../data/okrugs.json',orient='records',force_ascii=False)","repo_name":"ssuprunenko/ssuprunenko.github.io","sub_path":"deptrans/scripts/ngpt-stations.py","file_name":"ngpt-stations.py","file_ext":"py","file_size_in_byte":7664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"71765854477","text":"# -*- coding: utf-8 -*-\nimport mido\nimport numpy as np\nfrom pathlib import Path\nimport pandas as pd\n\ndef getPieces():\n # Set the path\n path = Path('data')\n # Return all midis in the path\n return list(path.glob('*.mid'))\n\ndef readPieces():\n # Array of tuples to create a dataframe later\n frames = []\n # Initiate time\n time = 0\n # Get pieces and iterate over them\n for piece in getPieces():\n # Get the dataframe and length of the piece \n df_temp, time = readNotes(piece, time)\n # Append the dataframe to frames\n frames.append(df_temp)\n\n df = pd.concat(frames)\n\n df.to_csv('music.csv', index=False)\n\n return df\n\ndef isAnyHandOrPedal(trackName):\n if trackName == 'Piano right' or trackName == 'Piano left' or trackName == 'Pedal':\n return True\n else:\n return False\n\n\ndef getHandNumber(trackName):\n if trackName == 'Piano right':\n return 1\n elif trackName == 'Piano left' or trackName == 'Pedal':\n return 0\n else:\n print('ERROR. No hand returned: return 0 for \"Piano left\" or \"Pedal\" and 1 for \"Piano right\"')\n\n\ndef loadPieces(force=False) -> pd.DataFrame:\n if force:\n return readPieces()\n\n return pd.read_csv('music.csv')\n\n\ndef readNotes(piece, time=0):\n # Array of tuples to create a dataframe later\n data = []\n # Array of all piano notes to hold the times of their activation for the left hand\n onLeft = np.zeros(128, dtype=int)\n # Array of all piano notes to hold the times of their activation for the right hand\n onRight = np.zeros(128, dtype=int)\n # Read the file\n midi = mido.MidiFile(piece)\n # Time will have to be reordered for different hand notes\n notesStarted = False\n # Initiate right and left hand side times\n rightMaxTime = 0\n leftMaxTime = 0\n # Initiate piece length\n pieceLength = 0\n # Set start time\n startTime = time\n\n # Get the tracks and iterate over them\n for track in midi.tracks:\n # Check if the notes belong to any hand\n if (isAnyHandOrPedal(track.name)):\n # Iterate over the messages in the track\n for msg in track:\n # Add the time of messages to the overall time\n # Check if notes have started\n if notesStarted: \n time = time + msg.time\n # Get the hand number (0 - left, 1 - right)\n hand = getHandNumber(track.name)\n # Check if it is a note message\n if msg.type == 'note_on':\n # Check if notes have started\n if not notesStarted: \n # Start the time from beginning in this case\n time = startTime + msg.time\n # Set that notes started\n notesStarted = True\n # Check if it was pressed \n if msg.velocity > 0:\n # Workaround to get truthy value for a note that was pressed at time 0\n if time == 0:\n time = -1\n # Check which hand (0 - left, 1 - right)\n if hand:\n # If it was pressed, get the time of this action\n onRight[msg.note] = time\n else:\n onLeft[msg.note] = time\n # Bring time back to zero if workaround\n if time == -1:\n time = 0\n # Check if it was released\n elif msg.velocity == 0:\n # Initiate start\n start = 0\n if hand:\n # Just a safety check to ensure it was pressed before\n if not onRight[msg.note]:\n continue\n # Set start \n start = onRight[msg.note]\n # Unpress the note\n onRight[msg.note] = 0\n else:\n if not onLeft[msg.note]:\n continue\n start = onLeft[msg.note]\n onLeft[msg.note] = 0\n\n # If workaround, get start to 0\n if start == -1:\n start = 0\n\n # Update the piece length\n if time > pieceLength:\n pieceLength = time\n\n # Get the length of the note\n length = time - start\n # Append a note\n data.append((msg.note, start, time, length, hand))\n\n # After the end of the track, set notesStarted to false \n notesStarted = False\n\n return pd.DataFrame(data, columns=['pitch', 'on', 'off', 'length', 'hand']), pieceLength\n\n\ndef toStateMatrix(df, minp=0, maxp=127, quant=60) -> np.ndarray:\n min_pitch = df.pitch.min()\n if minp < min_pitch:\n raise Exception(f'Minimum pitch in the data is lower then the bounds! Minimum is {min_pitch} while bounds are {minp}')\n length = df['off'].values[-1]\n steps = int(length / quant)\n bounds = maxp + 1 - minp\n stateMatrix = np.zeros((bounds, steps), dtype=int)\n for row in df.itertuples():\n pitch = row[1] - minp\n on = row[2]\n off = row[3]\n hand = row[5]\n if hand == 1:\n stateMatrix[pitch, int(on / quant):int(off / quant + 1)] = 1\n elif hand == 0:\n stateMatrix[pitch, int(on / quant):int(off / quant + 1)] = -1\n return stateMatrix\n\ndef state2Tuples(stateMatrix, minp, quant=60):\n data = []\n heights = np.shape(stateMatrix)[0]\n notes = np.zeros(heights, dtype=int)\n\n lengths = np.shape(stateMatrix)[1]\n for step in range(lengths):\n for note, state in enumerate(stateMatrix[:, step]):\n if step != 0:\n if state == 0:\n if stateMatrix[note, step - 1]:\n notes[note] = 0\n time = (step - 1) * quant\n data.append((147, note + minp, 0, time))\n if state != 0:\n if notes[note] == 0:\n notes[note] = 1\n time = step * quant\n data.append((147, note + minp, 70, time))\n\n sortedData = sorted(data, key=lambda x: x[-1])\n return sortedData\n\ndef tuples2Midi(messages, filename='Midi.mid'):\n print('Initialising MIDI file {}'.format(filename))\n song = mido.MidiFile()\n track = mido.MidiTrack()\n song.tracks.append(track)\n time = 0\n for message in messages:\n message = np.asarray(message)\n now = message[-1]\n delta = now - time\n time = now\n message[-1] = delta\n msg = mido.Message.from_bytes(message[:3])\n msg.time = delta\n track.append(msg)\n song.save(filename)\n\ndef quantizeOn(tick, quant=60):\n res = tick % quant\n if res != 0:\n if res < (quant / 2):\n tick -= res\n if res >= (quant / 2):\n tick += (quant - res)\n return tick\n\ndef quantizeOff(tickOn, tick, quant=60):\n res = tick % quant\n if res != 0:\n if res < (quant / 2):\n tick -= res\n if res >= (quant / 2):\n tick += (quant - res)\n if tick == tickOn:\n tick += quant\n return tick\n\ndef findLength(on, off):\n return off - on\n\ndef quantizeDf(df, quant=60):\n df['on'] = df.apply(lambda x: quantizeOn(x['on'], quant), axis=1)\n df['off'] = df.apply(lambda x: quantizeOff(x['on'], x['off'], quant), axis=1)\n df['length'] = df.apply(lambda x: findLength(x['on'], x['off']), axis=1)\n","repo_name":"AzisK/Experiments-in-Music-Creation","sub_path":"midiUtils.py","file_name":"midiUtils.py","file_ext":"py","file_size_in_byte":6825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"10960841337","text":"# #1071 Sum of Cosecutive Odd Numbers I by neilor Tonin\n\nstart = int(input())\nend = int(input())\nsumm = 0\n\nif start < end:\n for i in range(start+1, end, 1):\n if i % 2 != 0:\n summ += i\nelif start > end:\n for i in range(end+1, start, 1):\n if i % 2 != 0:\n summ += i\n\nprint(summ)\n ","repo_name":"iagorrr/Competitive-Programming-Algorithms","sub_path":"submissions/Becrowd/_1071(2).py","file_name":"_1071(2).py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"29"} +{"seq_id":"69952501520","text":"from celery import Celery\nfrom celery import shared_task\nfrom celery.schedules import crontab\nfrom django.db import transaction\n\nimport json\n\nfrom user import models as user_models\nfrom talentalps.celery import app\n\n@app.task\ndef update_company_image_order(new_order, company_pk):\n company_images = user_models.CompanyImage.objects.filter(company__pk=company_pk).only('order').order_by('order')\n order = json.loads(new_order)\n try:\n with transaction.atomic():\n for i in range(len(company_images)):\n pos = int(order['order'][i]) - 1\n if company_images[pos].order != i+1:\n company_images[pos].order = i+1\n company_images[pos].save()\n except:\n return False\n return True\n\n@app.task\ndef delete_company_image(image_pk, company_pk):\n image = user_models.CompanyImage.objects.get(pk=image_pk, company__pk=company_pk)\n try:\n image.delete()\n image_set = user_models.CompanyImage.objects.filter(company__pk=company_pk)\n for i in range(len(image_set)):\n image_set[i].order = i + 1\n image_set[i].save()\n\n except:\n return False\n return True","repo_name":"calumlim/talentalps","sub_path":"employer/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"15429794993","text":"import pandas as pd\nfrom scipy import sparse\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nratings = pd.read_csv('ratings.csv')\nmovies = pd.read_csv('movies.csv')\nratings = pd.merge(movies,ratings).drop(['genres','timestamp'],axis =1)\n\nuserRatings = ratings.pivot_table(index=['userId'],columns=['title'],values = 'rating')\nuserRatings = userRatings.dropna(thresh=10, axis=1).fillna(0,axis=1)\n\ncorrMatrix = userRatings.corr(method='pearson')\n\ndef get_similar(movie_name,rating):\n similar_ratings = corrMatrix[movie_name]*(rating-2.5)\n similar_ratings = similar_ratings.sort_values(ascending=False)\n return similar_ratings\n\nmovies = [('17 Again (2009)',3),('Amazing Spider-Man, The (2012)',4),('Iron Man (2008)',5),('Avengers: Age of Ultron (2015)',4),('Neighbors (2014)',4)]\nsimilar_movies =pd.DataFrame()\nfor movie,rating in movies:\n similar_movies = similar_movies.append(get_similar(movie,rating),ignore_index=True)\n\nsimilar_movies.sum().sort_values(ascending=False).head(20)\n\n\n","repo_name":"sanket2221/Recommender_system","sub_path":"collaborative_filter.py","file_name":"collaborative_filter.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"29916168512","text":"import ServoController\nimport LaserController\nimport tkinter\n\n#Get Screen Info\n#root = tkinter.Tk()\n#SCREEN_WIDTH = root.winfo_screenwidth()\n#SCREEN_HEIGHT = root.winfo_screenheight()\n\n#Assign variables for GPIO pin of peripheral \nxAxisPin = 7\nyAxisPin = 5\nlaserPin = 12\n\n#Create objects for hardware interface\nxAxis = ServoController.Servo(xAxisPin)\nyAxis = ServoController.Servo(yAxisPin)\nlaser = LaserController.Laser(laserPin)\n##mouse = MouseController.Mouse()\n\ndef printStatus():\n print(\"X: {0:.2f} Y: {1:.2f}\".format(xAxis.dutyCycle, yAxis.dutyCycle))\n #print(\"X:\", xAxis.dutyCycle, \"Y:\", yAxis.dutyCycle)\n laser.printStatus()\n\ndef printBETTERPRINTMETHOD():\n#Put code inside of me\n return\n\ndef moveLaser(x, y):\n xAxis.setDutyCycle(x)\n yAxis.setDutyCycle(y)\n \ndef end():\n xAxis.end()\n yAxis.end()\n laser.end()\n","repo_name":"KenGrossman/ServoCatLaser","sub_path":"DeviceManager.py","file_name":"DeviceManager.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"29377587396","text":"import random\nimport gym\nimport numpy as np\nfrom collections import deque\nimport torch\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\nfrom torch.optim import Adam\nimport argparse\n\n# 首先定义实验的超参数\ndef parse_args():\n parser = argparse.ArgumentParser(description='Parameters setting of DQN')\n\n parser.add_argument('--batch_size', type=int, default=64, help='batch size')\n parser.add_argument('--lr', type=float, default=2e-3, help='Learning rate for the net.')\n parser.add_argument('--num_episodes', type=int, default=500, help='the num of train epochs')\n parser.add_argument('--seed', type=int, default=0, help='Random seed.')\n\n parser.add_argument('--gamma', type=float, default=0.98, help='the discount rate')\n parser.add_argument('--epsilon', type=float, default=0.01, help='the epsilon rate')\n\n parser.add_argument('--target_update', type=float, default=10, help='the frequency of the target net')\n parser.add_argument('--buffer_size', type=float, default=10000, help='the size of the buffer')\n parser.add_argument('--minimal_size', type=float, default=500, help='the minimal size of the learning')\n\n parser.add_argument('--env_name', type=str, default=\"CartPole-v0\", help='the name of the environment')\n\n args = parser.parse_args()\n return args\n\n# 网络结构\nclass Qnet(nn.Module):\n def __init__(self, state_dim, hidden_dim, action_dim):\n super(Qnet, self).__init__()\n self.layer = nn.Sequential(\n nn.Linear(state_dim, hidden_dim),\n nn.ReLU(),\n nn.Linear(hidden_dim, action_dim)\n )\n\n def forward(self, s):\n s = self.layer(s)\n return s\n\nclass DQN:\n def __init__(self, args):\n self.args = args\n self.hidden_dim = 128 # Q网络隐藏层维度\n self.batch_size = args.batch_size\n self.minimum_size = args.minimal_size # 当经验池中的经验到达某一数量时就可以开始学习\n self.lr = args.lr # 学习率\n self.gamma = args.gamma # 折扣因子\n self.epsilon = args.epsilon # epsilon-贪婪策略\n self.target_update = args.target_update # 目标网络更新频率\n self.count = 0 # 计数器,记录更新次数,指示目标网络是否应当被更新\n self.num_episodes = args.num_episodes # 所需要采集的episode数量\n\n self.capacity = args.buffer_size # 经验池的容量\n self.buffer = deque(maxlen=self.capacity) # 初始化经验池 (队列)\n\n self.env = gym.make(args.env_name)\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n self.env.seed(args.seed)\n torch.manual_seed(args.seed)\n\n self.state_dim = self.env.observation_space.shape[0]\n self.action_dim = self.env.action_space.n\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n self.q_net = Qnet(self.state_dim, self.hidden_dim, self.action_dim).to(self.device)\n self.target_q_net = Qnet(self.state_dim, self.hidden_dim, self.action_dim).to(self.device)\n\n self.optimizer = Adam(self.q_net.parameters(), lr=self.lr)\n\n def take_action(self, state):\n if np.random.random() < self.epsilon:\n action = np.random.randint(self.action_dim)\n else:\n state = torch.tensor([state], dtype=torch.float).to(self.device)\n action = self.q_net(state).argmax().item()\n return action\n\n def store_transition(self,state, action, reward, next_state, done):\n self.buffer.append((state, action, reward, next_state, done))\n\n def update(self):\n if self.count % self.target_update == 0:\n self.target_q_net.load_state_dict(self.q_net.state_dict()) # 更新目标网络\n\n self.count += 1\n\n # 下面从经验池中随机抽取经验\n transitions = random.sample(self.buffer, self.batch_size)\n state, action, reward, next_state, done = zip(*transitions)\n\n states = torch.tensor(np.array(state), dtype=torch.float).to(self.device)\n actions = torch.tensor(action).view(-1, 1).to(self.device)\n rewards = torch.tensor(reward, dtype=torch.float).view(-1, 1).to(self.device)\n next_states = torch.tensor(np.array(next_state), dtype=torch.float).to(self.device)\n dones = torch.tensor(done, dtype=torch.float).view(-1, 1).to(self.device)\n\n q_values = self.q_net(states).gather(1, actions) # Q value\n max_next_q_values = self.target_q_net(next_states).max(1)[0].view(-1, 1) # 下个状态的最大Q值\n q_targets = rewards + self.gamma * max_next_q_values * (1 - dones) # TD target\n\n loss = torch.mean(F.mse_loss(q_values, q_targets)) # 均方误差损失函数\n self.optimizer.zero_grad() # PyTorch中默认梯度会累积,这里需要显式将梯度置为0\n loss.backward() # 反向传播更新参数\n self.optimizer.step()\n\nif __name__ == '__main__':\n args = parse_args()\n agent = DQN(args)\n return_list = []\n\n for episode in range(400):\n episode_return = 0\n state = agent.env.reset()\n while True:\n action = agent.take_action(state)\n next_state, reward, done, _ = agent.env.step(action)\n\n agent.store_transition(state, action, reward, next_state, done)\n\n state = next_state\n episode_return += reward\n\n if len(agent.buffer) > agent.minimum_size:\n agent.update()\n\n if done:\n print('Episode index: ', episode, '| Episode_return: ', episode_return)\n break\n\n return_list.append(episode_return)\n\n episodes_list = list(range(len(return_list)))\n plt.plot(episodes_list, return_list)\n plt.xlabel('Episodes')\n plt.ylabel('Returns')\n plt.show()\n\n\n\n\n\n\n\n\n\n","repo_name":"ZihaoZhouSCUT/Quick-Start-in-Reinforcement-Learning-Algorithm","sub_path":"Deep-Q-Network/my_dqn.py","file_name":"my_dqn.py","file_ext":"py","file_size_in_byte":6104,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"33884978464","text":"import concurrent.futures\nfrom dateutil.parser import parse as date_parser\nfrom datetime import datetime, timedelta\nimport json\nimport pytz\nimport requests\nfrom typing import Any, Dict, List, Optional, Tuple\n\nfrom fetch_and_render import get_latest_commit\n\nARCH_VERSIONS = [\"-cpu\", \"-gpu\", \"\"]\nIMAGES_TO_CHECK = [\"ray\", \"ray-ml\"]\nMAX_TIME_FOR_DOCKER_BUILD = timedelta(hours=3)\nPYTHON_VERSIONS = [\"-py37\", \"-py38\", \"-py39\", \"-py310\", \"\"]\n\n\ndef get_most_recent_layer(tag_resp: Dict[str, Any]) -> datetime:\n \"\"\"\n Find the time of the most recently created layer of a given image.\n \"\"\"\n dates = []\n for layer in tag_resp[\"history\"]:\n layer_json = json.loads(layer[\"v1Compatibility\"])\n dates.append(date_parser(layer_json[\"created\"]).replace(tzinfo=pytz.utc))\n return max(dates)\n\ndef fetch_manifest_time(image_name: str, tag: str, token: str) -> datetime:\n \"\"\"\n Fetches the manifest of the provided `image_name`:`tag` and returns the time\n it was created.\n \"\"\"\n manifest_url = f\"https://registry.hub.docker.com/v2/rayproject/{image_name}/manifests/{tag}\"\n manifest_resp = requests.get(manifest_url,headers={\"Authorization\": f\"Bearer {token}\"})\n assert manifest_resp.ok, f\"Status: {manifest_resp.status_code}\\nBody: {manifest_resp.text}\"\n return get_most_recent_layer(manifest_resp.json())\n \n\n\ndef check_last_updated_for_repo(image_name: str, tag_prefix=\"nightly\") -> Dict[str, datetime]:\n \"\"\"\n Returns a mapping from `image_name`:`tag` to time of creation. This looks through\n ARCH_VERSIONS and PYTHON_VERSIONS to generate all possible tags.\n \"\"\"\n token_url = f\"https://auth.docker.io/token?service=registry.docker.io&scope=repository:rayproject/{image_name}:pull\"\n token_resp = requests.get(token_url)\n assert token_resp.ok\n token = token_resp.json()[\"token\"]\n\n results = {}\n with concurrent.futures.ThreadPoolExecutor() as executor:\n for py_version in PYTHON_VERSIONS:\n for arch in ARCH_VERSIONS:\n tag = f\"{tag_prefix}{py_version}{arch}\"\n results[f\"{image_name}/{tag}\"] = executor.submit(fetch_manifest_time, image_name, tag, token)\n \n for tag, fut in results.items():\n results[tag] = fut.result()\n return results\n\ndef find_commit_of_age(age=timedelta(hours=4)) -> Tuple[str, datetime]:\n \"\"\"\n Finds the first commit that was made at least `age` time before now.\n \"\"\"\n recent_commits = get_latest_commit()\n now = datetime.now(tz=pytz.utc)\n for commit in recent_commits:\n # GitHub commits use UTC time\n created_at = datetime.fromtimestamp(commit.unix_time_s, tz=pytz.utc)\n if (now - age) > created_at:\n return (commit.sha, created_at)\n\n\n\ndef check_recent_commits_have_docker_build() -> List[str]:\n # We want to choose a commit that is old enough to have a completed\n # Docker build. We need to tie this to a commit because Docker images\n # are only built per commit (e.g. there may be no images built for \n # 48 hours over a weekend if there are no commits).\n sha, commit_time = find_commit_of_age(MAX_TIME_FOR_DOCKER_BUILD)\n sha = sha[:6]\n all_images = check_last_updated_for_repo(\"ray\")\n all_images.update(check_last_updated_for_repo(\"ray-ml\"))\n failed = []\n for tag, date in all_images.items():\n if tag.startswith(\"ray-ml/\") and tag.endswith(\"-cpu\"):\n continue\n if date < commit_time:\n failed.append(f\"- `{tag}` did not build for SHA: `{sha}`\")\n if len(failed) == 0:\n return []\n lines = [\n \"ðŸ�³ Your Docker Build Failure Report\", \n ]\n lines.extend(failed)\n return lines\n","repo_name":"ray-project/travis-tracker-v2","sub_path":"docker_checker.py","file_name":"docker_checker.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"12366565397","text":"import pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.pipeline import Pipeline\n\n\n# Columns with non-numerical values\ncategorical_columns = ['RCONSC', 'SEX', 'RSLEEP', 'RATRIAL', 'RCT', 'RVISINF', 'RHEP24',\n 'RASP3', 'RDEF1', 'RDEF2', 'RDEF3', 'RDEF4', 'RDEF5', 'RDEF6', 'RDEF7', 'RDEF8', 'STYPE', 'RDATE', 'RXASP', 'RXHEP',\n 'DASP14', 'DASPLT', 'DLH14', 'DMH14', 'DSCH', 'DCAA', 'DDIAGISC', 'DRSUNK', 'DALIVE',\n 'FDENNIS', 'FPLACE', 'FAP', 'FOAC', 'COUNTRY', 'CMPLASP', 'CMPLHEP']\n\n\nclass MultiColumnLabelEncoder:\n def __init__(self, columns=None):\n self.columns = categorical_columns # array of column names to encode\n\n def fit(self, X, y=None):\n return self # not relevant here\n\n def transform(self, X):\n '''\n Transforms columns of X specified in self.columns using\n LabelEncoder(). If no columns specified, transforms all\n columns in X.\n '''\n output = X.copy()\n print(self.columns)\n if self.columns is not None:\n for col in self.columns:\n output[col] = LabelEncoder().fit_transform(output[col])\n else:\n for colname, col in output.iteritems():\n output[colname] = LabelEncoder().fit_transform(col)\n return output\n\n def fit_transform(self, X, y=None):\n return self.fit(X, y).transform(X)\n","repo_name":"asimzz/stroke-trial-prediction","sub_path":"multi_column_label_encoder.py","file_name":"multi_column_label_encoder.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"24430962539","text":"import matplotlib\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport time\n\n\ndef visualise_results(experiments, file_name, column_name, title, xlabel, ylabel, skip_first_rows=0, show_bounds=True, bounds_alpha=0.15):\n exp_dfs = []\n cols = dict()\n cmap = matplotlib.cm.get_cmap(\"brg\")\n cmap2 = matplotlib.cm.get_cmap(\"brg\")\n\n for exp in experiments:\n if len(exp) == 2:\n exp_df = pd.read_csv(\"experiments/\" + exp[1] + \"/\" + file_name)\n exp_dfs.append(None)\n cols[exp[0]] = exp_df[column_name]\n else:\n exp_results = pd.DataFrame()\n for e in exp[1:]:\n e_df = pd.read_csv(\"experiments/\" + e + \"/\" + file_name)\n exp_results[e] = e_df[column_name]\n cols[exp[0]] = exp_results[list(exp[1:])].mean(axis=1)\n exp_results[\"lower\"] = exp_results[list(exp[1:])].min(axis=1)\n exp_results[\"upper\"] = exp_results[list(exp[1:])].max(axis=1)\n exp_dfs.append(exp_results)\n\n df = pd.DataFrame(cols).iloc[skip_first_rows:]\n axes = df.plot(colormap=cmap)\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n\n if show_bounds:\n for exp_id, exp in enumerate(experiments):\n exp_results = exp_dfs[exp_id]\n if exp_results is not None:\n axes.fill_between(exp_results.index, exp_results.upper, exp_results.lower, where=exp_results.upper > exp_results.lower, facecolor=cmap2(exp_id / float(len(experiments)-1)), alpha=bounds_alpha, interpolate=True)\n\n plt.savefig(\"figures/figure_\" + str(int(time.time())) + \".png\", dpi=400, bbox_inches='tight')\n plt.show()\n\n\ndef visualise_cumulative_reward(experiments, environment, skip_first_rows=0, show_bounds=True, bounds_alpha=0.15):\n visualise_results(experiments, \"timesteps.csv\", \"cumulative_reward\", environment + \": cumulative reward in time\", \"Time step\", \"Cumulative reward\", skip_first_rows, show_bounds, bounds_alpha)\n\n\ndef visualise_episode_reward(experiments, environment, skip_first_rows=0, show_bounds=True, bounds_alpha=0.15):\n visualise_results(experiments, \"episodes.csv\", \"reward\", environment + \": episode reward\", \"Episode\", \"Reward\", skip_first_rows, show_bounds, bounds_alpha)\n\n\ndef visualise_episode_duration(experiments, environment, skip_first_rows=0, show_bounds=True, bounds_alpha=0.15):\n visualise_results(experiments, \"episodes.csv\", \"duration\", environment + \": episode duration\", \"Episode\", \"Time steps\", skip_first_rows, show_bounds, bounds_alpha)\n","repo_name":"karolkuna/reinforcement-learning","sub_path":"visualisation.py","file_name":"visualisation.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"29"} +{"seq_id":"74255193398","text":"import pytest\nimport cl.enterprise_python.core as ep\nfrom approvaltests import verify\n\n\nclass DataClassTest:\n \"\"\"\n Tests for DataClass.\n \"\"\"\n\n def test_attribute_name(self):\n \"\"\"Test the effect of a typo in attribute name.\"\"\"\n\n # Assign value of attribute with typo in name\n obj = ep.UnsafeClass()\n\n # Attribute name has a typo here\n obj.int_attirbute = 2\n\n # But not here, so it has the old value\n assert obj.int_attribute == 1\n\n # And there is now a second, unwanted attribute with typo in name\n assert obj.int_attirbute == 2\n\n def test_equality(self):\n \"\"\"Test for the built-in equality operator.\"\"\"\n\n # One expects these two instances to be equal, and they are\n # without having to manually override the equality operator\n assert ep.DataClass() == ep.DataClass()\n\n def test_repr(self):\n \"\"\"Test how the instance will appear in the debugger.\"\"\"\n\n obj = ep.DataClass()\n obj.int_attribute = 1\n obj.list_attribute = [2, 3]\n obj_repr = repr(obj)\n verify(obj_repr)\n\n def test_list_attribute_initialization(self):\n \"\"\"\n Test the effect of initializing a mutable object\n directly instead of using Factory(type).\n \"\"\"\n\n # Create the first class instance and append an element\n # to list_attribute.\n obj_1 = ep.DataClass()\n obj_1.list_attribute.append(1)\n\n # Create the second class instance that should have\n # empty list_attribute, and it does because it uses\n # default_factory= rather than default=.\n obj_2 = ep.DataClass()\n assert len(obj_2.list_attribute) == 0\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__])\n","repo_name":"compatibl/enterprise-python","sub_path":"src/cl/enterprise_python/tests/classes/data_class_test.py","file_name":"data_class_test.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"43541968454","text":"#상하좌우로 풀면 안됨\n#우하좌상으로 구현해야 달팽이 출력 가능\n\nimport sys\nsys.stdin = open(\"input.txt\", \"r\")\n# 우 하 좌 상\n\n#\ndr= [0,1,0,-1]\ndc= [1,0,-1,0]\n\nT = int(input())\n\nfor t in range(1,T+1):\n N=int(input())\n\n arr=[[0]*N for _ in range(N)]\n\n d=0 #방향 0: 우, 1:하, 2:좌, 3:상, 각각의 번호는 인덱스를 의미한다.\n r=0\n c=0\n num=1\n\n while num <= N*N:\n arr[r][c] =num #현재칸에 값을 저장\n num += 1 #다음 숫자 준비\n\n #다음칸을 결정\n nr = r+dr[d]\n nc = c+dc[d]\n #범위를 벗어나기 전에 방향을 꺾어줘야 함\n\n if 0<=nr 0:\n if a % 2 == 0:\n l.insert(0, 0)\n a = a // 2\n else:\n l.insert(0, 1)\n a = a // 2\n return l\n\ndef shift(l):\n d = l.pop(0)\n l.append(d)\n return l\n\ndef bin2dec(l):\n f = 0\n p = 0\n for i in range(len(l) - 1, -1, -1):\n f = f + l[i] * (2 ** p)\n p += 1\n return f\n\na = int(input())\nl=dec2bin(a)\nlr = list(l)\nf = 0\nmax = 0\nwhile True:\n l=shift(l)\n bin2dec()\n if f > max:\n max = f\n if not lr != l:\n break\nprint(max)\n","repo_name":"valerpenko/Misha","sub_path":"Olymp/cycleshiftwithfunc.py","file_name":"cycleshiftwithfunc.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"1059161511","text":"#!/usr/bin/env python\n\n\"\"\"\nMTH TO LAST ELEMENT\nSPONSORING COMPANY:\n\n\n\nCHALLENGE DESCRIPTION:\n\n\nWrite a program which determines the Mth to the last element in a list.\n\nINPUT SAMPLE:\n\nThe first argument is a path to a file. The file contains the series of space delimited characters followed by an\ninteger. The integer represents an index in the list (1-based), one per line.\n\nFor example:\n\na b c d 4\ne f g h 2\nOUTPUT SAMPLE:\n\nPrint to stdout the Mth element from the end of the list, one per line. If the index is larger than the number of\nelements in the list, ignore that input.\n\nFor example:\na\ng\n\n\"\"\"\n\nfrom sys import argv\n\nwith open(argv[1], 'r') as f:\n test_cases = f.read().strip().splitlines()\n\ndef get_m_value():\n for line in test_cases:\n series, n = line.split()[:-1], int(line.split()[-1])\n if n <= len(series):\n out = series[0 - n]\n print(out)\n\n\n\nif __name__ == '__main__':\n get_m_value()","repo_name":"marshallhumble/Coding_Challenges","sub_path":"Code_Eval/Moderate/MthToLastValue/MthToLastValue.py3","file_name":"MthToLastValue.py3","file_ext":"py3","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"5257501091","text":"#Author: Suryansh Kumar, Australian National University\n#Example to linearly model 2D dataset (x(input), y(output)).\n\n#import the necessary library\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#set the input data\nx_data = [1.0, 2.0, 3.0]\ny_data = [2.0, 4.0, 6.0]\n\n#function to make a prediction\ndef prediction(x, w):\n return x*w\n\n#function to calculate the loss\ndef loss(x, y, w):\n y_predict = prediction(x, w)\n error = (y-y_predict)*(y-y_predict)\n return error\n\n#estimate the error for different values of slope (w) and store it in the list\nw_set = []\ne_set = []\nN = np.size(x_data)\n\n#loop through the different values of w's\nfor w in np.arange(-1.0, 5, 0.1):\n mse_iter = 0.0\n for x, y in zip(x_data, y_data):\n mse_iter = mse_iter + loss(x, y, w)\n e_set.append(mse_iter/N)\n w_set.append(w)\n\n#plot the error curve.\nplt.plot(w_set, e_set, 'r-')\nplt.xlabel('w (slope of the line)')\nplt.ylabel('mean square error')\nplt.title('Mean Squared Error Curve')\nplt.grid()\nplt.show()\n","repo_name":"suryanshkumar/PythonTutorial","sub_path":"linear_model/linear_model_example.py","file_name":"linear_model_example.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"22769325770","text":"import time\n\nimport pytest\nimport pytest_check as check\n\nfrom pom.fieldreestr_tests_funcs import FieldsReestrFuncs\n\n\n@pytest.mark.usefixtures('setup')\nclass Tests:\n\n def test_reestr_main_func(self):\n self.driver.delete_all_cookies()\n reestr_funcs = FieldsReestrFuncs(self.driver)\n assert reestr_funcs.login() == 0, 'Не удалось загрузить сраницу'\n assert reestr_funcs.reestr_page() != 1, \"Не удалось открыть реестр\"\n check.equal(reestr_funcs.fields_amount_check(), 0, \"Ошибка!\")\n check.not_equal(reestr_funcs.find_button(), 1, \"Не удалось нажать на кнопку Найти\")\n check.not_equal(reestr_funcs.clear_button(), 1, \"Не удалось нажать на кнопку Очистить\")\n check.not_equal(reestr_funcs.check_box_all(), 1, \"Не удалось нажать на кнопку мультичекбокс\")\n check.equal(reestr_funcs.check_box_select_all_count(), 0, \"Ошибка!\")\n print(reestr_funcs.custom_fields_amount_check())\n time.sleep(3)\n# print(\"\\n\" + Config().FIELD_REESTR_SEARCHRES_AMOUNT)\n# print(page_nav.get_many_btns(Config().FIELD_REESTR_SEARCHRES_AMOUNT))\n # page_nav.click_login_btn(\"isandsadmin\", \"FRNBuQw*#x*7\")\n# self.driver.get('https://beta.isands.ru/zemel-nyj-pasport')\n# print(database_meth.ConnectionToServer().cadastr_fields_count(\n# {\n# 'date_': '2023',\n# 'isarc': False\n# }\n# ))\n# print(page_nav.get_fields_amount())\n# page_nav.search_btn().click()\n# time.sleep(2)\n# page_nav.id_sub_test()\n# return page_nav\n\n\n\n\n","repo_name":"kharlamo/main_test.py","sub_path":"main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"8343665312","text":"#__author__: dongj\n#date: 2018/5/14\n#################################################\n# logRegression: Logistic Regression\n# Author : zouxy\n# Date : 2014-03-02\n# HomePage : http://blog.csdn.net/zouxy09\n# Email : zouxy09@qq.com\n#################################################\n\nimport numpy as np\nimport time\n#加载数据\ndef loadData():\n train_x = []\n train_y = []\n fileIn = open('testSet.txt')\n for line in fileIn.readlines():\n lineArr = line.strip().split()\n train_x.append([1.0, float(lineArr[0]), float(lineArr[1])])\n train_y.append(float(lineArr[2]))\n return np.mat(train_x), np.mat(train_y).transpose()\n\n# calculate the sigmoid function\ndef sigmoid(inX):\n return 1.0 / (1 + np.exp(-inX))\n\n\n# train a logistic regression model using some optional optimize algorithm\n# input: train_x is a mat datatype, each row stands for one sample\n# train_y is mat datatype too, each row is the corresponding label\n# opts is optimize option include step and maximum number of iterations\ndef trainLogRegres(train_x, train_y, opts):\n # calculate training time\n startTime = time.time()\n\n numSamples, numFeatures = np.shape(train_x)\n alpha = opts['alpha']\n maxIter = opts['maxIter']\n weights = np.ones((numFeatures, 1))\n\n # optimize through gradient descent algorilthm\n for k in range(maxIter):\n if opts['optimizeType'] == 'gradDescent': # gradient descent algorilthm\n output = sigmoid(train_x * weights)\n error = train_y - output\n weights = weights + alpha * train_x.transpose() * error\n elif opts['optimizeType'] == 'stocGradDescent': # stochastic gradient descent\n for i in range(numSamples):\n output = sigmoid(train_x[i, :] * weights)\n error = train_y[i, 0] - output\n weights = weights + alpha * train_x[i, :].transpose() * error\n elif opts['optimizeType'] == 'smoothStocGradDescent': # smooth stochastic gradient descent\n # randomly select samples to optimize for reducing cycle fluctuations\n dataIndex = range(numSamples)\n for i in list(range(numSamples)):\n alpha = 4.0 / (1.0 + k + i) + 0.01\n randIndex = int(np.random.uniform(0, len(dataIndex)))\n output = sigmoid(train_x[randIndex, :] * weights)\n error = train_y[randIndex, 0] - output\n weights = weights + alpha * train_x[randIndex, :].transpose() * error\n del(list(dataIndex)[randIndex]) # during one interation, delete the optimized sample\n else:\n raise NameError('Not support optimize method type!')\n\n print('Congratulations, training complete! Took %fs!' % (time.time() - startTime))\n return weights\n\n\n# test your trained Logistic Regression model given test set\ndef testLogRegres(weights, test_x, test_y):\n numSamples, numFeatures = np.shape(test_x)\n matchCount = 0\n for i in range(numSamples):\n predict = sigmoid(test_x[i, :] * weights)[0, 0] > 0.5\n if predict == bool(test_y[i, 0]):\n matchCount += 1\n accuracy = float(matchCount) / numSamples\n return accuracy\n\n\n\n\n\n\n\n\n","repo_name":"dongjiu0815/MachineLearningPractice","sub_path":"线性回归/Logistic回归/逻辑回归方法综合版本/逻辑回归.py","file_name":"逻辑回归.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"4"} +{"seq_id":"42857696919","text":"import asyncio\nimport random\n\nimport discord\nfrom discord.ext import commands\n\nbot = commands.Bot(command_prefix='k!')\n\nbotcolor = 0x000088\n\nbot.remove_command('help')\n\n########################################################################################################################\nextensions = ['cmds.alles', 'cmds.help', 'cmds.gmgn', 'cmds.fun', 'cmds.mod', 'cmds.tapply', 'cmds.error', 'cmds.emojis']\n\n\n########################################################################################################################\n@bot.event\nasync def on_ready():\n print('--------------------------------------')\n print('Bot ist bereit.')\n print('Eingeloggt als')\n print(bot.user.name)\n print(bot.user.id)\n print('--------------------------------------')\n bot.loop.create_task(status_task())\n\n\n########################################################################################################################\nasync def status_task():\n while True:\n user = sum(len(s.members) for s in bot.guilds)\n await bot.change_presence(activity=discord.Game('with the community'), status=discord.Status.online)\n await asyncio.sleep(15)\n\n await bot.change_presence(activity=discord.Game('mit {0} Usern'.format(user)), status=discord.Status.online)\n await asyncio.sleep(15)\n\n await bot.change_presence(activity=discord.Game('k!help | Help'), status=discord.Status.online)\n await asyncio.sleep(15)\n\n await bot.change_presence(activity=discord.Activity(name='der Kaffeehaus Community zu'.format(str(len)),\n type=discord.ActivityType.watching))\n await asyncio.sleep(15)\n\n\n########################################################################################################################\nif __name__ == '__main__':\n for extension in extensions:\n try:\n bot.load_extension(extension)\n except Exception as error:\n print('Ein Fehler ist in {} aufgetreten: [{}]'.format(extension, error))\n########################################################################################################################\n\nbot.run('Njg4ODcyODIwNjkxODI4NzUx.XpbyCw.mBTeP2CY5UFj5wvVtGCIQh1kc9c')\n","repo_name":"OriCat101/Kaffemaschiene","sub_path":"Kaffeemaschine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"3377164189","text":"\"\"\"\nmultilprocessing\n\npid - process id\nppid - parent process id\ngetpid()\ngetppid()\n\n\"\"\"\nimport os\nimport time\nfrom multiprocessing import Process\n\n\ndef foo(fname):\n # print(f'[subprocess PID-{os.getpid()}]')\n # print(f'[subprocess PID-{os.getpid()}]running sub-process {fname}')\n print(f'[subprocess PID-{os.getpid()}]parent process id PPID {os.getppid()}')\n return fname;\n\n\nif __name__ == \"__main__\":\n print(f'[main PID-{os.getpid()}]main process started')\n print(f'[main PID-{os.getpid()}]parent process id PPID {os.getppid()}')\n\n NUM_OF_PROCESS = 3\n for i in range(NUM_OF_PROCESS):\n proc1 = Process(target=foo, args=('sproc_'+str(i),))\n proc1.start()\n # time.sleep(1)\n\n print(f'[main PID-{os.getpid()}]running main process')\n","repo_name":"edu-athensoft/ceit4101python","sub_path":"stem1500_modules/module_50_process/process_4.py","file_name":"process_4.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"35172011844","text":"import json\nimport logging\nimport math\nimport os\nimport random\nfrom copy import deepcopy\nfrom typing import Dict, Iterable, List, Tuple, Optional\n\nimport numpy as np\nimport torch\nfrom allennlp.common import Tqdm\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.common.util import lazy_groups_of\nfrom allennlp.data.fields import ListField, MetadataField\nfrom allennlp.data.instance import Instance\nfrom allennlp.data.samplers import BatchSampler\nfrom opendebias.training.samplers.bias_aware_batch_sampler_base import \\\n BiasAwareBatchSamplerBase\nfrom overrides import overrides\nfrom torch.utils import data\n\nlogger = logging.getLogger(__name__)\n\n@BatchSampler.register(\"bias-free\")\nclass BiasFreeSampler(BiasAwareBatchSamplerBase):\n def __init__(\n self,\n data_source: data.Dataset,\n batch_size: int,\n K: int = None,\n K_pos: int = None,\n K_neg: int = None,\n stratified_sample: bool = True,\n bias_prediction_file = None,\n label_namespace: str = \"labels\",\n padding_noise: Optional[float] = 0.1,\n sorting_keys: List[str] = None\n ):\n super().__init__(data_source, batch_size, None, K, K_pos, K_neg, stratified_sample, padding_noise, sorting_keys, label_namespace)\n\n\n @overrides\n def group_instances(self, instances, bias_logits):\n label_count = self.vocab.get_vocab_size(self._label_namespace)\n bias = torch.from_numpy(np.zeros((len(instances), label_count))).float()\n return super().group_instances(\n instances,\n {instances[0]['metadata']['dataset_name']:bias}\n )\n\n @overrides\n def sample(self, target_group, target_y, num, cur_idx = None):\n target_instance_idxes = self.groupped_instances[target_group][target_y]\n res_idxes = []\n while len(res_idxes) < num:\n sample_idx = random.sample(target_instance_idxes, k=1)[0]\n if sample_idx == cur_idx:\n continue\n res_idxes.append(sample_idx)\n return res_idxes\n\n @overrides\n def sample_instances(self, instance_idx):\n instance = self.instances[instance_idx]\n group_idx = self.instance_group[instance_idx]\n assert group_idx == 0\n y = instance['label']._label_id\n\n # sample pos\n pos_idxes, neg_idxes = [], []\n pos_count = self.K_pos if self.K_pos is not None else self.K\n neg_count = self.K_neg if self.K_neg is not None else self.K\n\n if self.stratified_sample:\n for _ in range(pos_count):\n pos_idxes.extend(self.sample(group_idx, y, 1, cur_idx=instance_idx)) # same group, same class\n for _ in range(neg_count):\n neg_idxes.extend(self.sample(group_idx, self.another_class(group_idx, y), 1, cur_idx=instance_idx)) # same group, different class\n else:\n raise NotImplementedError()\n pos_idxes = random.sample(self.cross_groupped_instances[y][group_idx], pos_count)\n neg_idxes = random.sample(self.cross_label_instances[group_idx][y], neg_count)\n\n \n # merge pos and neg into instances\n self.merge_instance('positives', instance, pos_idxes)\n self.merge_instance('negatives', instance, neg_idxes)\n return pos_idxes, neg_idxes\n","repo_name":"Beastlyprime/group-invariant-learning","sub_path":"mnli-hans/opendebias/training/samplers/bias_free_batch_sampler.py","file_name":"bias_free_batch_sampler.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"4"} +{"seq_id":"73022698358","text":"import numpy\nimport functools\n\nwith open(\"input.txt\", \"r\") as f:\n trees = numpy.array([[int(t) for t in l.strip()] for l in f.readlines()])\n\n\ndef part1():\n def treevis(trees, vis):\n for r in range(len(trees)):\n m = -1\n for c in range(len(trees[r])):\n if trees[r][c] > m:\n vis[r][c] = 1\n m = trees[r][c]\n\n vis = numpy.zeros(trees.shape)\n\n treevis(trees, vis)\n treevis(numpy.fliplr(trees), numpy.fliplr(vis))\n treevis(numpy.transpose(trees), numpy.transpose(vis))\n treevis(numpy.fliplr(numpy.transpose(trees)), numpy.fliplr(numpy.transpose(vis)))\n\n print(numpy.count_nonzero(vis))\n\n\ndef part2():\n scores = numpy.zeros(trees.shape)\n\n def scenic_score(r, c, trees):\n def score_line(line):\n score = 0\n for t in line[1:]:\n score += 1\n if t >= line[0]:\n break\n return score\n\n scores[r][c] = numpy.multiply.reduce(\n [\n score_line(l)\n for l in (trees[r, c:], numpy.flip(trees[r, : c + 1]), trees[r:, c], numpy.flip(trees[: r + 1, c]))\n ]\n )\n\n for r in range(len(trees)):\n for c in range(len(trees[r])):\n scenic_score(r, c, trees)\n\n print(scores)\n print(\"Max:\", numpy.max(scores))\n\n\n# part1()\npart2()\n","repo_name":"josh2112/AdventOfCode2022","sub_path":"8/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"30095839206","text":"\"\"\"\r\n Author: Furqan Fiaz\r\n Date: 14-08-2022, 4.17 Pm\r\n This file will contain the information about File Input Output in Python\r\n\"\"\"\r\n\"\"\"\r\n\"r\" - Open File for reading - Default mode\r\n\"w\" - Open File for writing\r\n\"x\" - Creates file if not exists\r\n\"a\" - Add more content to a file\r\n\"t\" - text mode - Default mode\r\n\"b\" - binary mode\r\n\"+\" - read and write\r\n\"\"\"\r\n\r\n#open(\"F_15_File.txt\") # It will not return any value for printing\r\n# f=open(\"F_15_File.txt\") # f is the file pointer\r\n# content=f.read()\r\n# print(content)\r\n# f.close()\r\n\r\n# Open in mode\r\n# f=open(\"F_15_File.txt\",\"rb\") # it will read in binary\r\n# content=f.read()\r\n# print(content)\r\n# f.close()\r\n\r\n# ReadLine Function\r\nf=open(\"F_15_File.txt\")\r\n# for line in f:\r\n# print(line)\r\n# To print without line break\r\nfor line in f:\r\n print(line, end=\"\")\r\n","repo_name":"Furqaann/Python-Course","sub_path":"F_15_File IO.py","file_name":"F_15_File IO.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"15086214933","text":"import dense_correspondence_manipulation.utils.utils as pdc_utils\npdc_utils.add_dense_correspondence_to_python_path()\nfrom dense_correspondence.network.dense_correspondence_network import DenseCorrespondenceNetwork\nfrom dense_correspondence.dataset.dynamic_spartan_dataset import DynamicSpartanDataset\n\ndef build_spartan_dataset(logs_root_path):\n \"\"\"\n This is just to keep the ancestral dataloader happy.\n The only thing that matters is the logs_root_path, I think. It should be an absolute path\n \"\"\"\n mini_config_expanded = dict()\n single_object_scene_config = dict()\n single_object_scene_config[\"train\"] = [\"2019-04-10-22-12-43\"] # Hack - doesn't need to change?\n single_object_scene_config[\"test\"] = [\"2019-04-10-22-12-43\"] # Hack - doesn't need to change?\n single_object_scene_config[\"object_id\"] = \"test_object\"\n single_object_scene_dict = dict()\n single_object_scene_dict[\"new_object\"] = single_object_scene_config\n mini_config_expanded[\"single_object\"] = single_object_scene_dict\n multi_object_scene_config = dict()\n multi_object_scene_config[\"train\"] = []\n multi_object_scene_config[\"test\"] = []\n mini_config_expanded[\"multi_object\"] = multi_object_scene_config\n mini_config_expanded[\"logs_root_path\"] = logs_root_path\n \n return DynamicSpartanDataset(config_expanded=mini_config_expanded)\n\n\ndef get_object_starting_poses(dataset, # ImitationEpisodeDataset or ImitationEpisodeSequenceDataset\n ): # return -> numpy.array shape [N,2] with N = # episodes\n object_poses = []\n for log_name in dataset.episodes.keys():\n episode = dataset.episodes[log_name]\n object_pose_trained = episode.sim_config_dict[\"instances\"][0][\"q0\"]\n object_poses.append([object_pose_trained[0], object_pose_trained[1]])\n\n return object_poses\n","repo_name":"peteflorence/visuomotor_correspondence","sub_path":"dataset/dataset_utils.py","file_name":"dataset_utils.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"4"} +{"seq_id":"38065472679","text":"# AFTER EDITORIAL\n\n#\n# PROP.\n# Subgraph of tree is forest.\n#\n# PROP.\n# For forest,\n# = + \n# since for each component,\n# Vc = Ec + 1.\n#\n# PROP.\n# result = \\sum_{i <= j} num_components(i, j)\n# = \\sum num_verts(i, j) - num_edges(i, j)\n# = (\\sum num_verts(i, j)) - (\\sum num_edges(i, j))\n#\n# \\sum num_verts(i, j) = 1 + .. + (n-1) + n +\n# 1 + .. + (n-1) +\n# ... +\n# 1\n# = (tetrahedral number) = binom(n + 2, 3)\n#\n# \\sum num_edges(i, j) = \\sum_e |{(i, j) | e in [i, j]}|\n# = \\sum_e (i0 * (n + 1 - j0)) (where e = (i0, j0))\n#\n\n\ndef solve(ls, debug=0):\n n = len(ls) + 1\n term1 = ((n + 2) * (n + 1) * n) // 6 # 1st term in above PROP\n term2 = 0 # 2nd term in above PROP\n for i, j in ls:\n if i > j:\n i, j = j, i\n term2 += i * (n + 1 - j)\n\n return term1 - term2\n\n\ndef main(istr, ostr):\n n = int(istr.readline())\n ls = []\n for i in range(n - 1):\n v1, v2 = list(map(int, istr.readline().strip().split()))\n ls.append([v1, v2])\n result = solve(ls)\n print(result, file=ostr)\n\n\nif __name__ == \"__main__\":\n import sys\n\n main(sys.stdin, sys.stdout)\n\n\nimport unittest\nimport io, sys\n\n\nclass Test(unittest.TestCase):\n def test_0(self):\n pass\n\n def test_1(self):\n inp = \"\"\"\\\n3\n1 3\n2 3\n\"\"\"\n main(io.StringIO(inp), sys.stdout)\n\n def test_2(self):\n inp = \"\"\"\\\n10\n5 3\n5 7\n8 9\n1 9\n9 10\n8 4\n7 4\n6 10\n7 2\n\"\"\"\n main(io.StringIO(inp), sys.stdout)\n","repo_name":"hi-ogawa/practice","sub_path":"atcoder/abc173_f/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"7949655632","text":"import random # импорт модуля random\n\nmessages_count = random.randint(1, 5)\nUU = random.randint(1, 4)\nRR = random.randint(5, 30)\nTT = random.randint(5, 30)\nif messages_count == 1:\n print(f'Сделай {UU} подтягиваний ')\nelif messages_count == 2:\n print(f'Сделай {RR} упражений на пресс ')\nelif messages_count == 3 :\n print(f'Сделай {TT} отжиманий ')\nelse:\n print('Сегодня отдыхай')\n","repo_name":"Denis-Davydov-get/python","sub_path":"Daniil/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"25917801303","text":"#!/usr/bin/python3\n\"\"\" art union 1300 \"\"\"\n\nfrom unittest import TestCase\nfrom typing import NamedTuple, List\n\n\nclass Matrix():\n \"\"\" matrix class \"\"\"\n\n rows = 0\n cols = 0\n eles = []\n\n def __init__(self, rows, cols, eles) -> None:\n \"\"\" constructor for matrix \"\"\"\n self.rows = rows\n self.cols = cols\n self.eles = eles\n\n def process(self) -> None:\n \"\"\" add the element above or to the left\n whichever is larger \"\"\"\n\n for indr in range(1, self.rows):\n self.eles[indr][0] += self.eles[indr-1][0]\n\n for indc in range(1, self.cols):\n self.eles[0][indc] += self.eles[0][indc-1]\n\n for indr in range(1, self.rows):\n for indc in range(1, self.cols):\n self.eles[indr][indc] += max(\n [\n self.eles[indr-1][indc],\n self.eles[indr][indc-1]\n ]\n )\n\n def last(self) -> List[int]:\n \"\"\" return the last column \"\"\"\n\n return [self.eles[indr][-1] for indr in range(self.rows)]\n\n\nclass Args(NamedTuple):\n \"\"\" arguments \"\"\"\n\n mat: Matrix\n\n\ndef get_args() -> Args:\n \"\"\" get the input arguments \"\"\"\n\n rows, cols = list(map(int, input().split()))\n eles = []\n for _ in range(rows):\n eles.append(list(map(int, input().split())))\n return Args(mat=Matrix(rows, cols, eles))\n\n\nclass Test(TestCase):\n \"\"\" test cases \"\"\"\n\n def test_process(self):\n \"\"\" test matrix method adding the element above or\n to the left whichever is larger \"\"\"\n\n test = Matrix(5, 1, [[1], [2], [3], [4], [1]])\n test.process()\n self.assertEqual(test.eles, [[1], [3], [6], [10], [11]])\n\n def test_last(self):\n \"\"\" test that the last column is returned \"\"\"\n\n test = Matrix(5, 1, [[1], [2], [3], [4], [1]])\n self.assertEqual(test.last(), [1, 2, 3, 4, 1])\n \n def test_answer(self):\n \"\"\" test amswer \"\"\"\n\n test = Matrix(5, 1, [[1], [2], [3], [4], [5]])\n self.assertEqual(answer(test), [1, 3, 6, 10, 15])\n test = Matrix(4, 2, [[2, 5], [3, 1], [5, 3], [10, 1]])\n self.assertEqual(answer(test), [7, 8, 13, 21])\n\n\ndef print_list(lis: List[int]) -> None:\n \"\"\" print a list aesthetically \"\"\"\n\n for ele in lis:\n print(ele, end=' ')\n print()\n\n\ndef answer(mat: Matrix):\n \"\"\" get answer given the arguments \"\"\"\n\n mat.process()\n return mat.last()\n\n\ndef main() -> None:\n \"\"\" do stuff \"\"\"\n\n args = get_args()\n mat = args.mat\n print_list(answer(mat))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ahoque1999/competitive_programming","sub_path":"art_union.py","file_name":"art_union.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"29453795457","text":"# flask app\n\nfrom flask import Flask, render_template\nfrom datetime import datetime\nfrom bokeh.embed import server_session # use server_session instead of autoload_server\nfrom bokeh.client import pull_session\n\n\"\"\"\nserver_session is a function that returns the source code for bokeh scripts\npull_session enables bokeh to be pulled from flask\nTo allow bokeh with flask, you need to run the command for bokeh serve like below to include flask local port so as to\nallow flask display it on the web\n\"bokeh serve --allow-websocket-origin=127.0.0.1:5000 random_generator.py\"\n OR\n\"bokeh serve random_generator.py --host=\"*\"\n\n\n1. python manage.py startapp bokehapp\n2. Change 'DIRS': [] to 'DIRS': [os.path.join(BASE_DIR, 'bokehapp/templates')], inside TEMPLATES contained\n settings.py\n\"\"\"\n\n# instantiate the flask app\napp = Flask(__name__)\n\n\n# create index page function\n# @app.route(\"/\")\n# def index():\n# return render_template(\"index.html\", context=datetime.now())\n\n\n@app.route(\"/\")\ndef index():\n session = pull_session(url=\"http://localhost:5006/random_generator\")\n bokeh_scripts = server_session(\n None, url=\"http://localhost:5006/random_generator\", session_id=session.id\n )\n return render_template(\"index.html\", bokeh_scripts=bokeh_scripts)\n\n\n# run the app with \"python app.py\"\n# set your debug = false when you are deploying your application\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"Rhotimie/Data-Visualization","sub_path":"Embedding Bokeh Plots in Websites/Bokeh_Server_Flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"1478589008","text":"import json\nimport requests\nimport websocket\nimport threading\nimport time\n\nAPI_KEY = 'YOUR_API_KEY_HERE' # NOTE: change to use your API_KEY\n\nws_type = 'books'\nfeed = 'stocks'\ntarget = 'realtime'\ninstruments = ['PETR4','BBAS3']\n\ndef get_new_token(API_KEY):\n url = f\"https://dataservices.btgpactualsolutions.com/api/v2/authenticate\"\n headersList = {\n \"Content-Type\": \"application/json\" \n }\n payload = json.dumps({\n \"api_key\": API_KEY,\n \"client_id\": f\"btgsolutions-client-python\"\n })\n response = requests.request(\"POST\", url, data=payload, headers=headersList)\n if response.status_code == 200:\n token = json.loads(response.text).get('AccessToken')\n if not token:\n raise Exception('Something has gone wrong while authenticating: No token as response.')\n else:\n response = json.loads(response.text)\n raise Exception(f\"ERROR HTTP Status {response.status_code}\")\n \n return token\n\ntoken = get_new_token(API_KEY=API_KEY)\n\nprint(token)\n\ndef on_open(ws):\n print(\"CONNECTION OPENED SUCCESSFULLY\")\n\ndef on_message(ws, message):\n print(f\"Received message: {message}\")\n\ndef on_error(ws, error):\n print(f\"Error : {error}\")\n\ndef on_close(ws, close_status_code, close_msg):\n print(f\"CONNECTION CLOSED {close_status_code} , {close_msg}\")\n\nws = websocket.WebSocketApp(\n url=\"wss://dataservices.btgpactualsolutions.com/stream/v2/marketdata/book/stocks\",\n on_open=on_open,\n on_message=on_message,\n on_error=on_error,\n on_close=on_close, \n header={\"Sec-WebSocket-Protocol\": token}\n)\n\nwst = threading.Thread(target=ws.run_forever)\nwst.daemon = True\nwst.start()\n\ntime.sleep(3)\n\navailable_instruments_msg = {\"action\": \"available_to_subscribe\"}\nws.send(json.dumps(available_instruments_msg))\n\nsubscription_msg = {\"action\":\"subscribe\", \"params\": [\"PETR4\", \"VALE3\", \"WDOZ22\"]}\nws.send(json.dumps(subscription_msg))\n\nwhile True:\n time.sleep(30)\n","repo_name":"BTG-Pactual-Solutions/btgsolutions-dataservices-python-client","sub_path":"examples/direct_connect.py","file_name":"direct_connect.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"5729733123","text":"import sys\r\n\r\ndef stringToIntegerArray(inputString):\r\n \"\"\"This function is used to transform a string array (the one we get as an argument) into an integer array\r\n \r\n Arguments:\r\n inputString {string} -- The input string that will be converted in an integer array\r\n \r\n Returns:\r\n array -- The integer array converted from the string\r\n \"\"\"\r\n res = []\r\n #For each character in our input array, we get the character, transform it into an integer and remove 48\r\n #which is the offset for numbers in the ASCII table.\r\n for i in range(len(inputString)) :\r\n res.append(ord(inputString[i]) - 48)\r\n return res\r\n\r\ndef checkNumberIntegrity(string):\r\n \"\"\"This function allows us to check if a credit card number given is valid or not\r\n \r\n Arguments:\r\n string {string} -- The credit card number given in argument\r\n \r\n Returns:\r\n [ -- [description]\r\n \"\"\"\r\n\r\n #If the input given is not 16 numbers long, we throw an exception.\r\n if len(string) != 16 :\r\n Exception(\"The credit card number you put is not 16 numbers long, it's only \" + str(len(string)))\r\n\r\n #This line allows us to check every character of the string given and see if this is a number or not. If it's not, we throw an exception.\r\n map(Exception('The credit card number you put is invalid !'), filter(str.isdigit, string))\r\n\r\n\r\ndef luhnAddition(inputInt):\r\n \"\"\"This function takes in an integer and doubles it. If the result is equal or greater than 10, it adds up the digits.\r\n \r\n Arguments:\r\n inputInt {integer} -- This is the integer to be doubled by the function.\r\n \r\n Returns:\r\n integer -- Gives the transformed integer\r\n \"\"\"\r\n #If the number is greater than 4, doubling it will give a result equal or greater than 10 so we add the digits\r\n if(inputInt > 4):\r\n return 1 + (inputInt*2 % 10)\r\n #If it's not, we double it normally and send it back\r\n else:\r\n return inputInt * 2","repo_name":"haysberg/credithasher","sub_path":"cchashlib.py","file_name":"cchashlib.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"34841776617","text":"from operator import truediv\nfrom tkinter import Tk\nfrom tkinter.filedialog import askopenfilename, asksaveasfile\nimport time\nimport os\n\narquivo = None\n\ndef abrirArquivo():\n\n Tk().withdraw()\n\n try:\n arquivo = askopenfilename(filetypes=((\"Text files\", \"*.xml\"),))\n with open(arquivo, encoding='UTF-8') as arquivo:\n return str(arquivo.read())\n\n except (FileNotFoundError):\n print(\"Nenhum arquivo selecionado\")\n time.sleep(6)\n os.system('cls')\n\n\ndef mostrarArquivo(arquivo):\n print('---------- arquivo ----------')\n print(arquivo)\n print('-----------------------------')\n\n\ndef alterar(arquivo, ramal, senha):\n if arquivo is None:\n print(f'não tem nenhum arquivo para ser alterado')\n else:\n if(ramal is not None and senha is not None):\n arquivoFinal = trocarTexto(arquivo, '', '',\n ramal)\n arquivoFinal = trocarTexto(\n arquivoFinal, '', '', ramal)\n arquivoFinal = trocarTexto(arquivoFinal, '', '',\n ramal)\n arquivoFinal = trocarTexto(arquivoFinal, '', '',\n ramal)\n arquivoFinal = trocarTexto(arquivoFinal, '', '',\n ramal)\n arquivoFinal = trocarTexto(arquivoFinal, '', '',\n senha) \n print('alterado documento')\n time.sleep(2)\n os.system('cls')\n return arquivoFinal\n\ndef salvar(arquivo_url, texto_modificado):\n\n try:\n arquivo = asksaveasfile(defaultextension= '.xml').name\n\n with open(arquivo, mode='w', encoding=\"UTF-8\") as xml:\n xml.write(texto_modificado)\n except(AttributeError):\n print('nem arquivo selecionado para ser salvo')\n\n if arquivo_url == arquivo:\n os.remove(arquivo_url)\n\n\ndef posicaoInicial(texto, argumento, inicioBloco=None, finalBloco=None):\n return texto.index(argumento, inicioBloco, finalBloco)\n\n\ndef posicaoFinal(texto, argumento, inicioBloco=None, finalBloco=None):\n return texto.index(argumento, inicioBloco, finalBloco) + len(argumento)\n\n\ndef textoCompleta(texto, argumentoInicial, argunmentoFinal, inicioBloco, finalBloco):\n return texto[posicaoInicial(texto, argumentoInicial, inicioBloco, finalBloco): \n posicaoFinal(texto, argunmentoFinal, inicioBloco, finalBloco)]\n\n\ndef valorTexto(texto, argumentoInicial, argunmentoFinal):\n return texto[posicaoFinal(texto, argumentoInicial): \n posicaoInicial(texto, argunmentoFinal)]\n\n\ndef trocarTexto(texto, argumentoInicial, argunmentoFinal, novoValor,):\n inicioBloco = posicaoInicial(texto, '')\n finalBloco = posicaoFinal(texto, '')\n argumento = textoCompleta(texto, argumentoInicial, argunmentoFinal, inicioBloco, finalBloco)\n\n if argumento is not None:\n valor = valorTexto(argumento, argumentoInicial, argunmentoFinal)\n argumentoNovo = argumento.replace(valor, novoValor)\n return texto.replace(argumento, argumentoNovo)\n \n \ndef validarStr(textoInput):\n while True:\n texto = input(textoInput).strip()\n\n if texto == '':\n continue\n else:\n return texto","repo_name":"matheusbat28/cmd-xml","sub_path":"regra/funcao.py","file_name":"funcao.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"2924294463","text":"import argparse\nimport multiprocessing\nimport os\nimport random\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport tarfile\nimport tempfile\nimport time\n\n\nclass GLIMPS_Writer:\n \"\"\"Class to pipe stdout and sterr to parent process in asyncrounous threads\"\"\"\n def __init__(self, stdout_messenger, stderr_messenger):\n self.stdout_messenger = stdout_messenger\n self.stderr_messenger = stderr_messenger\n\n def write(self, s):\n if __name__ == '__main__':\n sys.stdout.write(s)\n else:\n self.stdout_messenger.put(s)\n\n def error(self, s):\n if __name__ == '__main__':\n sys.stderr.write(s)\n else:\n self.stderr_messenger.put(s)\n\n\ndef check_negative(value):\n \"\"\"Checks for a negative interger value in command line argument\"\"\"\n int_value = int(value)\n if int_value < 0:\n raise argparse.ArgumentTypeError(\"%s is an invalid value. Use a positive interger.\" % value)\n return int_value\n\n\ndef check_arguments():\n \"\"\"Reads command line arguments and generates help text\"\"\"\n arguments = argparse.ArgumentParser(\n description=\"Gupta Lab Integrated Microbial Phylogeny and Supermatrix Pipeline\",\n epilog=\"Written by Mobolaji Adeolu (Adeolum@McMaster.ca), Department of Biochemistry and Biomedical Sciences, McMaster University. Copyright 2016.\")\n Gene_Select = arguments.add_mutually_exclusive_group()\n arguments.add_argument(\n \"-i\", \"--Input_Directory\",\n action=\"store\",\n type=str,\n default=os.path.join(os.path.dirname(sys.argv[0]), \"Input\"),\n required=False,\n metavar=\"Input Directory\",\n help=\"The directory in which input genome files are located\",\n )\n arguments.add_argument(\n \"-o\", \"--Output_Directory\",\n action=\"store\",\n type=str,\n default=os.path.join(os.path.dirname(sys.argv[0]), \"Output\"),\n required=False,\n metavar=\"Output Directory\",\n help=\"The directory in which Output files are to be placed\",\n )\n arguments.add_argument(\n \"-d\", \"--Protein_Distribution\",\n action=\"store\",\n type=float,\n default=0.8,\n required=False,\n choices=[x * 0.01 for x in range(0, 101)],\n metavar=\"Protein Distribution\",\n help=\"The minimum proportion of the input genomes in which a protein family must be present\",\n )\n arguments.add_argument(\n \"-t\", \"--Threads\",\n action=\"store\",\n type=check_negative,\n default=0,\n metavar=\"Number of Threads\",\n help=\"The minimum proportion of the input genomes in which a protein family must be present\",\n )\n Gene_Select.add_argument(\n \"-p\", \"--Target_Proteins\",\n action=\"store\",\n type=str,\n default=\"\",\n required=False,\n metavar=\"Target Proteins\",\n help=\"The location of a file containing the protein targets for protein family identification (optional)\",\n )\n Gene_Select.add_argument(\n \"-m\", \"--Marker_Proteins\",\n action=\"store\",\n type=str,\n default=\"\",\n required=False,\n choices=[\"actinobacteria\", \"alphaproteobacteria\", \"archaea\", \"bacteria_and_archaea\", \"bacteria\",\n \"bacteroidetes\", \"betaproteobacteria\", \"chlamydiae\", \"chloroflexi\", \"cyanobacteria\",\n \"deinococcus-thermus\", \"deltaproteobacteria\", \"epsilonproteobacteria\", \"firmicutes\",\n \"gammaproteobacteria\", \"spirochaetes\", \"thermotogae\"],\n metavar=\"PhyEco Marker Protein Family\",\n help=\"The predefined marker protein family set to be used for analysis (optional)\",\n )\n arguments.add_argument(\n \"--Single_Copy\",\n action=\"store_true\",\n default=False,\n required=False,\n help=\"Determines whether the pipeline utilizes only single copy homologs\",\n )\n arguments.add_argument(\n \"--PAMatrix\",\n action=\"store_true\",\n default=False,\n required=False,\n help=\"Determines whether the pipeline produces a PA Matrix\",\n )\n arguments.add_argument(\n \"--POCP\",\n action=\"store_true\",\n default=False,\n required=False,\n help=\"Determines whether the pipeline produces a POCP Matrix\",\n )\n arguments.add_argument(\n \"--AAI\",\n action=\"store_true\",\n default=False,\n required=False,\n help=\"Determines whether the pipeline produces an AAI Matrix\",\n )\n arguments.add_argument(\n \"--Fast_Cluster\",\n action=\"store_true\",\n default=False,\n required=False,\n help=\"Skips the HMM-based iterative clustering steps after CD-Hit during the core genome identification process\",\n )\n arguments.add_argument(\n \"--Fast_Phylogeny\",\n action=\"store_true\",\n default=False,\n required=False,\n help=\"Skips the RAxML based tree building step after FastTree\",\n )\n arguments.add_argument(\n \"--No_Tree\",\n action=\"store_true\",\n default=False,\n required=False,\n help=\"Skips all phylogenetic tree building steps\",\n )\n arguments.add_argument(\n \"--Polymorphic\",\n action=\"store_true\",\n default=False,\n required=False,\n help=\"Trims invariant sites from multiple sequence alignments\",\n )\n arguments.add_argument(\n \"-f\", \"--Alignment_Filtering\",\n action=\"store\",\n type=str,\n default=\"Trim\",\n required=False,\n choices=[\"Trim\", \"Weight\"],\n metavar=\"Alignment Filtering\",\n help=\"The method by which the alignments will be filtered\",\n )\n arguments.add_argument(\n \"--cdhit\",\n action=\"store\",\n type=str,\n default=\"cd-hit\",\n required=False,\n help=\"Path to the cd-hit executable\",\n )\n arguments.add_argument(\n \"--jackhmmer\",\n action=\"store\",\n type=str,\n default=\"jackhmmer\",\n required=False,\n help=\"Path to the jackhmmer executable\",\n )\n arguments.add_argument(\n \"--hmmbuild\",\n action=\"store\",\n type=str,\n default=\"hmmbuild\",\n required=False,\n help=\"Path to the hmmbuild executable\",\n )\n arguments.add_argument(\n \"--hmmsearch\",\n action=\"store\",\n type=str,\n default=\"hmmsearch\",\n required=False,\n help=\"Path to the hmmsearch executable\",\n )\n arguments.add_argument(\n \"--clustalo\",\n action=\"store\",\n type=str,\n default=\"clustalo\",\n required=False,\n help=\"Path to the clustalo executable\",\n )\n arguments.add_argument(\n \"--trimal\",\n action=\"store\",\n type=str,\n default=\"trimal\",\n required=False,\n help=\"Path to the trimal executable\",\n )\n arguments.add_argument(\n \"--fasttree\",\n action=\"store\",\n type=str,\n default=\"fasttree\",\n required=False,\n help=\"Path to the fasttree executable\",\n )\n arguments.add_argument(\n \"--raxml\",\n action=\"store\",\n type=str,\n default=\"raxml\",\n required=False,\n help=\"Path to the raxml executable\",\n )\n args = arguments.parse_args()\n return args\n\n\ndef make_dir(dir_name, Output):\n \"\"\"Make/overwrite folders\"\"\"\n if not os.path.exists(dir_name):\n os.mkdir(dir_name)\n else:\n shutil.rmtree(dir_name)\n count = 0\n while os.path.exists(dir_name) and count < 15:\n count += 1\n time.sleep(1)\n else:\n try:\n os.mkdir(dir_name)\n except OSError:\n Output.error(\"\\nUnable to access directory: \" + dir_name)\n sys.exit()\n\n\ndef Build_Output_Dirs(Output_Directory, stdout_messenger, stderr_messenger):\n \"\"\"Builds output directory structure\"\"\"\n Output = GLIMPS_Writer(stdout_messenger, stderr_messenger)\n Genome_Dir = os.path.join(Output_Directory, \"Data\", \"Genomes\")\n Protein_Dir = os.path.join(Output_Directory, \"Data\", \"Proteins\")\n Alignment_Dir = os.path.join(Output_Directory, \"Data\", \"Protein Alignments\")\n Concatenated_Dir = os.path.join(Output_Directory, \"Data\", \"Concatenated Alignments\")\n Tree_Dir = os.path.join(Output_Directory, \"Data\", \"Phylogenetic Trees\")\n Log_Dir = os.path.join(Output_Directory, \"Logs\")\n GLIMPSe_Output_Dir = os.path.join(Output_Directory, \"GLIMPSe Output\")\n Dependency_Dir = os.path.join(os.path.dirname(sys.argv[0]), \"Dependencies\")\n Marker_Dir = os.path.join(os.path.dirname(sys.argv[0]), \"PhyEco Marker Protein Families\")\n if not os.path.exists(Output_Directory):\n os.mkdir(Output_Directory)\n make_dir(os.path.join(Output_Directory, \"Data\"), Output)\n make_dir(Genome_Dir, Output)\n make_dir(Protein_Dir, Output)\n make_dir(Alignment_Dir, Output)\n make_dir(Concatenated_Dir, Output)\n make_dir(Tree_Dir, Output)\n make_dir(Log_Dir, Output)\n make_dir(GLIMPSe_Output_Dir, Output)\n return Genome_Dir, Protein_Dir, Alignment_Dir, Concatenated_Dir, Tree_Dir, Log_Dir, GLIMPSe_Output_Dir, Dependency_Dir, Marker_Dir\n\n\ndef Prepare_Dependencies(Dependency_Dir, stdout_messenger, stderr_messenger):\n \"\"\"Checks dependencies and sets dependency variables\"\"\"\n Output = GLIMPS_Writer(stdout_messenger, stderr_messenger)\n deps = check_arguments()\n Threads = deps.Threads\n with tempfile.TemporaryFile() as dump:\n try:\n subprocess.check_call(deps.cdhit, stdout=dump, stderr=dump)\n CDHIT = deps.cdhit\n except (subprocess.CalledProcessError, OSError):\n CDHIT = \"\"\n try:\n subprocess.check_call(deps.jackhmmer, stdout=dump, stderr=dump)\n JACKHMMER = deps.jackhmmer\n except (subprocess.CalledProcessError, OSError):\n JACKHMMER = \"\"\n try:\n subprocess.check_call(deps.hmmbuild, stdout=dump, stderr=dump)\n HMMBUILD = deps.hmmbuild\n except (subprocess.CalledProcessError, OSError):\n HMMBUILD = \"\"\n try:\n subprocess.check_call(deps.hmmsearch, stdout=dump, stderr=dump)\n HMMSEARCH = deps.hmmsearch\n except (subprocess.CalledProcessError, OSError):\n HMMSEARCH = \"\"\n try:\n subprocess.check_call(deps.clustalo, stdout=dump, stderr=dump)\n ClustalOmega = deps.clustalo\n except (subprocess.CalledProcessError, OSError):\n ClustalOmega = \"\"\n try:\n subprocess.check_call(deps.trimal, stdout=dump, stderr=dump)\n TrimAl = deps.trimal\n except (subprocess.CalledProcessError, OSError):\n TrimAl = \"\"\n try:\n subprocess.check_call([deps.fasttree, \"-expert\"], stdout=dump, stderr=dump)\n FastTree = deps.fasttree\n except (subprocess.CalledProcessError, OSError):\n FastTree = \"\"\n try:\n subprocess.check_call(deps.raxml, stdout=dump, stderr=dump)\n RAxML = deps.raxml\n except (subprocess.CalledProcessError, OSError):\n RAxML = \"\"\n if sys.platform.startswith(\"linux\"):\n OS_Dir = \"Linux\"\n if CDHIT == \"\":\n CDHIT = os.path.join(Dependency_Dir, OS_Dir, \"CD-HIT\", \"cd-hit\")\n if JACKHMMER == \"\":\n JACKHMMER = os.path.join(Dependency_Dir, OS_Dir, \"HMMer\", \"jackhmmer\")\n if HMMBUILD == \"\":\n HMMBUILD = os.path.join(Dependency_Dir, OS_Dir, \"HMMer\", \"hmmbuild\")\n if HMMSEARCH == \"\":\n HMMSEARCH = os.path.join(Dependency_Dir, OS_Dir, \"HMMer\", \"hmmsearch\")\n if ClustalOmega == \"\":\n ClustalOmega = os.path.join(Dependency_Dir, OS_Dir, \"Clustal Omega\", \"clustalo\")\n if TrimAl == \"\":\n TrimAl = os.path.join(Dependency_Dir, OS_Dir, \"TrimAl\", \"trimal\")\n if FastTree == \"\":\n FastTree = os.path.join(Dependency_Dir, OS_Dir, \"FastTree\", \"fasttree\")\n if RAxML == \"\":\n RAxML = os.path.join(Dependency_Dir, OS_Dir, \"RAxML\", \"raxml\")\n if Threads == 0:\n if isinstance(multiprocessing.cpu_count(), int):\n Threads = multiprocessing.cpu_count()\n else:\n Threads = 1\n elif sys.platform.startswith(\"darwin\"):\n OS_Dir = \"OSX\"\n if CDHIT == \"\":\n CDHIT = os.path.join(Dependency_Dir, OS_Dir, \"CD-HIT\", \"cd-hit\")\n if JACKHMMER == \"\":\n JACKHMMER = os.path.join(Dependency_Dir, OS_Dir, \"HMMer\", \"jackhmmer\")\n if HMMBUILD == \"\":\n HMMBUILD = os.path.join(Dependency_Dir, OS_Dir, \"HMMer\", \"hmmbuild\")\n if HMMSEARCH == \"\":\n HMMSEARCH = os.path.join(Dependency_Dir, OS_Dir, \"HMMer\", \"hmmsearch\")\n if ClustalOmega == \"\":\n ClustalOmega = os.path.join(Dependency_Dir, OS_Dir, \"Clustal Omega\", \"clustalo\")\n if TrimAl == \"\":\n TrimAl = os.path.join(Dependency_Dir, OS_Dir, \"TrimAl\", \"trimal\")\n if FastTree == \"\":\n FastTree = os.path.join(Dependency_Dir, OS_Dir, \"FastTree\", \"fasttree\")\n if RAxML == \"\":\n RAxML = os.path.join(Dependency_Dir, OS_Dir, \"RAxML\", \"raxml\")\n if Threads == 0:\n if isinstance(multiprocessing.cpu_count(), int):\n Threads = multiprocessing.cpu_count()\n else:\n Threads = 1\n elif sys.platform.startswith(\"win32\"):\n OS_Dir = \"Windows\"\n if CDHIT == \"\":\n CDHIT = os.path.join(Dependency_Dir, OS_Dir, \"CD-HIT\", \"cd-hit.exe\")\n if JACKHMMER == \"\":\n JACKHMMER = os.path.join(Dependency_Dir, OS_Dir, \"HMMer\", \"jackhmmer.exe\")\n if HMMBUILD == \"\":\n HMMBUILD = os.path.join(Dependency_Dir, OS_Dir, \"HMMer\", \"hmmbuild.exe\")\n if HMMSEARCH == \"\":\n HMMSEARCH = os.path.join(Dependency_Dir, OS_Dir, \"HMMer\", \"hmmsearch.exe\")\n if ClustalOmega == \"\":\n ClustalOmega = os.path.join(Dependency_Dir, OS_Dir, \"Clustal Omega\", \"clustalo.exe\")\n if TrimAl == \"\":\n TrimAl = os.path.join(Dependency_Dir, OS_Dir, \"TrimAl\", \"trimal.exe\")\n if FastTree == \"\":\n FastTree = os.path.join(Dependency_Dir, OS_Dir, \"FastTree\", \"fasttree.exe\")\n if RAxML == \"\":\n RAxML = os.path.join(Dependency_Dir, OS_Dir, \"RAxML\", \"raxml.exe\")\n if Threads == 0:\n if isinstance(multiprocessing.cpu_count(), int):\n Threads = multiprocessing.cpu_count()\n else:\n Threads = 1\n if CDHIT == \"\" or JACKHMMER == \"\" or HMMBUILD == \"\" or HMMSEARCH == \"\" or ClustalOmega == \"\" or TrimAl == \"\" or FastTree == \"\" or RAxML == \"\":\n Output.error(\"\\nOperating system unrecognized.\\n\")\n sys.exit()\n with tempfile.TemporaryFile() as dump:\n try:\n subprocess.check_call(CDHIT, stdout=dump, stderr=dump)\n except subprocess.CalledProcessError:\n pass\n except OSError:\n Output.error(\"\\nUnable to execute CD-Hit dependency. Check binaries and permissions.\\n\")\n sys.exit()\n try:\n subprocess.check_call(JACKHMMER, stdout=dump, stderr=dump)\n except subprocess.CalledProcessError:\n pass\n except OSError:\n Output.error(\"\\nUnable to execute JackHMMer dependency. Check binaries and permissions.\\n\")\n sys.exit()\n try:\n subprocess.check_call(HMMBUILD, stdout=dump, stderr=dump)\n except subprocess.CalledProcessError:\n pass\n except OSError:\n Output.error(\"\\nUnable to execute HMMBuild dependency. Check binaries and permissions.\\n\")\n sys.exit()\n try:\n subprocess.check_call(HMMSEARCH, stdout=dump, stderr=dump)\n except subprocess.CalledProcessError:\n pass\n except OSError:\n Output.error(\"\\nUnable to execute HMMSearch dependency. Check binaries and permissions.\\n\")\n sys.exit()\n try:\n subprocess.check_call(ClustalOmega, stdout=dump, stderr=dump)\n except subprocess.CalledProcessError:\n pass\n except OSError:\n Output.error(\"\\nUnable to execute Clustal Omega dependency. Check binaries and permissions.\\n\")\n sys.exit()\n try:\n subprocess.check_call(TrimAl, stdout=dump, stderr=dump)\n except subprocess.CalledProcessError:\n pass\n except OSError:\n Output.error(\"\\nUnable to execute TrimAl dependency. Check binaries and permissions.\\n\")\n sys.exit()\n try:\n subprocess.check_call([FastTree, \"-expert\"], stdout=dump, stderr=dump)\n except subprocess.CalledProcessError:\n pass\n except OSError:\n Output.error(\"\\nUnable to execute FastTree dependency. Check binaries and permissions.\\n\")\n sys.exit()\n try:\n subprocess.check_call(RAxML, stdout=dump, stderr=dump)\n except subprocess.CalledProcessError:\n pass\n except OSError:\n Output.error(\"\\nUnable to execute RAxML dependency. Check binaries and permissions.\\n\")\n sys.exit()\n return CDHIT, JACKHMMER, HMMBUILD, HMMSEARCH, ClustalOmega, TrimAl, FastTree, RAxML, Threads\n\n\n# Initial processing of genome files are handled by the following functions\ndef Replace_Chars(Text):\n \"\"\"Replaces special characters in genome names\"\"\"\n Replacement_Dict = {\" \": \"_\", \"'\": \"\", \":\": \"_\", \"/\": \"_\", \",\": \"_\", \"(\": \"_\", \")\": \"_\"}\n for target, replacement in Replacement_Dict.iteritems():\n Text = Text.replace(target, replacement)\n return Text\n\n# TODO Detect and tranlate nucleotide sequences\ndef Process_Genome_Files(Input_Dir, Genome_Dir, Log_Dir, Output):\n \"\"\"Copies genome files to data directory and assigns IDs to proteins\"\"\"\n Genome_Dictionary = {}\n AllGenomeProts = []\n Genomes = []\n Accepted_Filetypes = [\".fasta\", \".fsa\", \".faa\", \".fas\"]\n for fasta in os.listdir(Input_Dir):\n if os.path.splitext(fasta)[1].lower() in Accepted_Filetypes:\n Genomes.append(fasta)\n else:\n if len(os.path.splitext(fasta)[1]) > 0:\n Output.error(fasta + \" is of an unsupported file type and will not be included in the analysis\\n\")\n\n def Assign_Protein_ID(Input_Genome, Output_Directory, GenID):\n \"\"\"Local function to assign unique 10 digit ID to each protein\"\"\"\n ProteinID = 0\n with open(Input_Genome) as InGen:\n with open(os.path.join(Output_Directory, Replace_Chars(os.path.basename(Input_Genome))), \"w+\") as OutGen:\n Char_Count = 0\n for line in InGen:\n if line.startswith(\">\") and line.endswith(\"\\n\"):\n Char_Count = 0\n ProteinID += 1\n if ProteinID == 1:\n OutGen.write(\">\" + str(GenID).zfill(4) + str(ProteinID).zfill(6) + \"\\n\")\n else:\n OutGen.write(\"\\n\" + \">\" + str(GenID).zfill(4) + str(ProteinID).zfill(6) + \"\\n\")\n elif line[0].isalpha() and ProteinID > 0:\n line_content = []\n for char in line:\n if char in \"OUBZJX\":\n char = \"-\"\n if char.isalpha() and Char_Count < 60:\n Char_Count += 1\n line_content.append(char.upper())\n elif char.isalpha() and Char_Count >= 60:\n Char_Count = 1\n line_content.append(\"\\n\")\n line_content.append(char.upper())\n else:\n OutGen.write(\"\".join(line_content))\n\n for GenomeID, Genome_File in enumerate(Genomes, 1):\n no_space_file_name = Replace_Chars(Genome_File)\n Assign_Protein_ID(os.path.join(Input_Dir, Genome_File), Genome_Dir, GenomeID)\n Genome_Dictionary[str(GenomeID).zfill(4)] = os.path.splitext(no_space_file_name)[0]\n with open(os.path.join(Genome_Dir, no_space_file_name), \"r\") as Gen:\n if GenomeID == 1:\n AllGenomeProts += Gen.readlines()\n else:\n AllGenomeProts.append(\"\\n\")\n AllGenomeProts += Gen.readlines()\n Count = GenomeID - 1\n if Count > 1 and Count % 10 == 0:\n Output.write(\"%s input files completed initial processing...\\n\" % Count)\n else:\n with open(os.path.join(Genome_Dir, \"ConcatenatedGenomeFiles\"), \"w\") as AllGenomes:\n AllGenomes.writelines(AllGenomeProts)\n with open(os.path.join(Log_Dir, \"Genome_ID.tsv\"), \"w\") as GenDict:\n GenDict.write(\"Genome ID\\tGenome Name\\n\")\n for key in sorted(Genome_Dictionary.keys()):\n GenDict.write(key + \"\\t\" + Genome_Dictionary[key] + \"\\n\")\n return Genome_Dictionary\n\n\n# Identification of protein families using CD-HIT and HMMer is handled by the following functions\ndef CDHIT_Subprocess(CDHIT, Log_Dir, Output, Input_File, Output_File, Threshold, Word_Size):\n \"\"\"Runs subprocess for CDHIT\"\"\"\n CDHIT_Output = \"\"\n cmd = [CDHIT, \"-i\", Input_File, \"-o\", Output_File, \"-c\", Threshold, \"-n\", Word_Size, \"-s\", \"0.5\", \"-M\", \"0\", \"-T\",\n \"0\"]\n try:\n CDHIT_Output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as call_err:\n with open(os.path.join(Log_Dir, \"CDHIT.txt\"), \"a\") as CDHIT_Log:\n CDHIT_Log.write(CDHIT_Output)\n CDHIT_Log.write(\"\\n\" + \"Command: \" + str(call_err.cmd) + \"\\nError: \" + str(call_err.output))\n Output.error(\"\\nCD-HIT Error. Check Logs.\\n\")\n sys.exit()\n except OSError as os_err: # TODO More verbose OS Error\n with open(os.path.join(Log_Dir, \"CDHIT.txt\"), \"a\") as CDHIT_Log:\n CDHIT_Log.write(CDHIT_Output)\n CDHIT_Log.write(\"\\n\" + str(os_err.strerror))\n Output.error(\"\\nCD-HIT Error. Check Logs.\\n\")\n sys.exit()\n with open(os.path.join(Log_Dir, \"CDHIT.txt\"), \"a\") as CDHIT_Log:\n CDHIT_Log.write(CDHIT_Output)\n\n\ndef Parse_Clusters(Clusters, Input_File):\n \"\"\"Converts CDHIT output into a nested list of protein families\"\"\"\n with open(Input_File + \".clstr\", \"r\") as Cluster_File:\n Clusters_Reps = [x[0] for x in Clusters]\n Primary_Cluster = \"\"\n Additional_Clusters = []\n for line in Cluster_File:\n if line[0].isdigit():\n if line.endswith(\"*\\n\"):\n Primary_Cluster = re.search(\">..........\", line).group(0)\n else:\n Additional_Clusters.append(re.search(\">..........\", line).group(0))\n else:\n if Primary_Cluster != \"\":\n if Primary_Cluster not in Clusters_Reps:\n Clusters.append([Primary_Cluster])\n Clusters_Reps.append(Primary_Cluster)\n if len(Additional_Clusters) > 0:\n for clust in Additional_Clusters:\n Secondary_Cluster = clust\n if Primary_Cluster in Clusters_Reps and Secondary_Cluster in Clusters_Reps:\n Primary_Index = Clusters_Reps.index(Primary_Cluster)\n Secondary_Index = Clusters_Reps.index(Secondary_Cluster)\n Clusters[Primary_Index] += Clusters[Secondary_Index]\n del Clusters[Secondary_Index]\n del Clusters_Reps[Secondary_Index]\n else:\n Primary_Index = Clusters_Reps.index(Primary_Cluster)\n Clusters[Primary_Index].append(Secondary_Cluster)\n else:\n Additional_Clusters = []\n return Clusters\n\n\ndef Run_CDHIT(Genome_Dir, CDHIT, Log_Dir, ConcatenatedGenomeFile, Output):\n \"\"\"Completes iterative CDHIT clustering\"\"\"\n Clusters = []\n Input_File = ConcatenatedGenomeFile\n Output_File = os.path.join(Genome_Dir, \"CDHIT_Clusters_0.9\")\n Threshold = \"0.9\"\n Word_Size = \"5\"\n CDHIT_Subprocess(CDHIT, Log_Dir, Output, Input_File, Output_File, Threshold, Word_Size)\n Wait_Count = 0\n while not os.path.exists(Output_File):\n time.sleep(0.1)\n Wait_Count += 1\n if Wait_Count > 30:\n Output.error(\"\\nGLIMPSe pipeline Error. Check Logs.\\n\")\n sys.exit()\n Clusters = Parse_Clusters(Clusters, Output_File)\n Input_File = Output_File\n Output_File = os.path.join(Genome_Dir, \"CDHIT_Clusters_0.8\")\n Threshold = \"0.8\"\n Word_Size = \"5\"\n CDHIT_Subprocess(CDHIT, Log_Dir, Output, Input_File, Output_File, Threshold, Word_Size)\n Wait_Count = 0\n while not os.path.exists(Output_File):\n time.sleep(0.1)\n Wait_Count += 1\n if Wait_Count > 30:\n Output.error(\"\\nGLIMPSe pipeline Error. Check Logs.\\n\")\n sys.exit()\n Clusters = Parse_Clusters(Clusters, Output_File)\n Input_File = Output_File\n Output_File = os.path.join(Genome_Dir, \"CDHIT_Clusters_0.7\")\n Threshold = \"0.7\"\n Word_Size = \"4\"\n CDHIT_Subprocess(CDHIT, Log_Dir, Output, Input_File, Output_File, Threshold, Word_Size)\n Wait_Count = 0\n while not os.path.exists(Output_File):\n time.sleep(0.1)\n Wait_Count += 1\n if Wait_Count > 30:\n Output.error(\"\\nGLIMPSe pipeline Error. Check Logs.\\n\")\n sys.exit()\n Clusters = Parse_Clusters(Clusters, Output_File)\n Input_File = Output_File\n Output_File = os.path.join(Genome_Dir, \"CDHIT_Clusters_0.6\")\n Threshold = \"0.6\"\n Word_Size = \"4\"\n CDHIT_Subprocess(CDHIT, Log_Dir, Output, Input_File, Output_File, Threshold, Word_Size)\n Wait_Count = 0\n while not os.path.exists(Output_File):\n time.sleep(0.1)\n Wait_Count += 1\n if Wait_Count > 30:\n Output.error(\"\\nGLIMPSe pipeline Error. Check Logs.\\n\")\n sys.exit()\n Clusters = Parse_Clusters(Clusters, Output_File)\n return Clusters, Output_File\n\n\ndef Parse_Fasta(fasta):\n \"\"\"Converts Fasta file into a dictionary which uses fasta descriptions as keys\"\"\"\n Parsed = {}\n with open(fasta, \"r\") as Input:\n CurrentDesc = \"\"\n for line in Input:\n if line.startswith(\">\"):\n if line.count(\"|\") > 0:\n CurrentDesc = line.split(\"|\")[-1].split(\"[\")[0].strip()\n else:\n CurrentDesc = line.rstrip()\n Parsed[CurrentDesc] = \"\"\n else:\n Parsed[CurrentDesc] += line.rstrip()\n return Parsed\n\n\ndef Parse_HMM(HMM):\n \"\"\"Reads profile names from HMM profile files\"\"\"\n Names = []\n with open(HMM, \"r\") as Input:\n for line in Input:\n if line.startswith(\"NAME \"):\n Names.append(line[6:].strip())\n return Names\n\n\ndef Create_Prot_Files(Protein_Dir, Concat_Gen_Dict, Clusters):\n \"\"\"Generates fasta file from fasta dictionary\"\"\"\n for index, item in enumerate(Clusters, 1):\n if not os.path.exists(os.path.join(Protein_Dir, \"Protein_Family_\" + str(index) + \".fasta\")):\n with open(os.path.join(Protein_Dir, \"Protein_Family_\" + str(index) + \".fasta\"), \"w\") as ProtFile:\n for sub_item in item:\n ProtFile.write(sub_item + \"\\n\")\n ProtFile.write(Concat_Gen_Dict[sub_item] + \"\\n\")\n\n\ndef Remove_Singletons(Nested_List):\n \"\"\"Returns an output protein family list containing only multi-protein families\"\"\"\n copy_list = [x[:] for x in Nested_List]\n removal_list = []\n for item in copy_list:\n if len(item) < 2:\n removal_list.append(item)\n for item in removal_list:\n copy_list.remove(item)\n return copy_list\n\n\ndef Remove_Changed_Clusters(Directory, Old_Clusters, New_Clusters):\n \"\"\"Compares two lists of proteins to identify differences.\n Deletes protein files from the data directory that differ between the two lists.\"\"\"\n Rename_Dict = {}\n for ProtFam in Old_Clusters:\n Path = os.path.join(Directory, \"Protein_Family_\" + str(Old_Clusters.index(ProtFam) + 1))\n if ProtFam not in New_Clusters:\n if os.path.exists(Path + \".fasta\"):\n os.remove(Path + \".fasta\")\n if os.path.exists(Path):\n os.remove(Path)\n elif ProtFam in New_Clusters:\n if not Old_Clusters.index(ProtFam) == New_Clusters.index(ProtFam):\n if os.path.exists(Path + \".fasta\"):\n Rename_Dict[Old_Clusters.index(ProtFam)] = (os.path.basename(Path) + \".fasta\", \"Protein_Family_\" + str(New_Clusters.index(ProtFam) + 1) + \".fasta\")\n if os.path.exists(Path):\n Rename_Dict[str(Old_Clusters.index(ProtFam))] = (os.path.basename(Path), \"Protein_Family_\" + str(New_Clusters.index(ProtFam) + 1))\n for key in sorted(Rename_Dict.keys()):\n if os.path.exists(os.path.join(Directory, Rename_Dict[key][1])):\n os.remove(os.path.join(Directory, Rename_Dict[key][1]))\n shutil.move(os.path.join(Directory, Rename_Dict[key][0]), os.path.join(Directory, Rename_Dict[key][1]))\n\n\ndef Run_HMMBUILD(Alignment_Dir, Protein_Alignment, HMMBUILD, Output):\n \"\"\"Runs HMMBuild subprocess\"\"\"\n cmd = [HMMBUILD, \"--cpu\", \"1\", os.path.join(Alignment_Dir, os.path.splitext(Protein_Alignment)[0]),\n os.path.join(Alignment_Dir, Protein_Alignment)]\n HMMBUILD_Output = \"\"\n try:\n HMMBUILD_Output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as call_err:\n HMMBUILD_Output += \"\\n\" + \"Command: \" + str(call_err.cmd) + \"\\nError: \" + str(call_err.output)\n Output.error(\"\\nHMMBUILD Error. Check Logs.\\n\")\n except OSError as os_err:\n HMMBUILD_Output += \"\\n\" + str(os_err.strerror)\n Output.error(\"\\nHMMBUILD Error. Check Logs.\\n\")\n return HMMBUILD_Output\n\n\ndef Run_HMMSEARCH(Alignment_Dir, Protein_Alignment, index, Genome_Dir, Representative_File, HMMSEARCH, Output):\n \"\"\"Runs HMMSearch subprocess\"\"\"\n cmd = [HMMSEARCH, \"-E\", \"1e-5\", \"--incE\", \"1e-20\", \"--noali\", \"--cpu\", \"1\", os.path.join(Alignment_Dir, Protein_Alignment),\n os.path.join(Genome_Dir, Representative_File)]\n HMMSEARCH_Output = \"\"\n try:\n HMMSEARCH_Output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as call_err:\n HMMSEARCH_Output += \"\\n\" + \"Command: \" + str(call_err.cmd) + \"\\nError: \" + str(call_err.output)\n Output.error(\"\\nHMMSEARCH Error. Check Logs.\\n\")\n except OSError as os_err:\n HMMSEARCH_Output += \"\\n\" + str(os_err.strerror)\n Output.error(\"\\nHMMSEARCH Error. Check Logs.\\n\")\n Return_Tuple = (index, HMMSEARCH_Output)\n return Return_Tuple\n\n\ndef HMM_Clustering(Genome_Dir, Alignment_Dir, HMMBUILD, HMMSEARCH, CDHIT_Clusters, CDHIT_Clusters_Only_Multi, Log_Dir, Representative_File, Threads, Output):\n \"\"\"Runs HMMBuild and HMMSearch on all proteins in input directory and parses output\"\"\"\n Accepted_Filetypes = [\".fasta\", \".fsa\", \".faa\", \".fas\"]\n Alignments = []\n Profiles = []\n HMM_Clusters = [x[:] for x in CDHIT_Clusters]\n HMM_Cluster_Reps = [x[0] for x in HMM_Clusters]\n HMMBuildPool = multiprocessing.Pool(Threads)\n HMMBUILDQueue = []\n for Protein_Alignment in os.listdir(Alignment_Dir):\n if os.path.splitext(Protein_Alignment)[1].lower() in Accepted_Filetypes and not os.path.exists(\n os.path.splitext(Protein_Alignment)[0]):\n HMMBUILDQueue.append(\n HMMBuildPool.apply_async(Run_HMMBUILD, args=(Alignment_Dir, Protein_Alignment, HMMBUILD, Output)))\n Alignments.append(Protein_Alignment)\n Profiles.append(os.path.splitext(Protein_Alignment)[0])\n HMMBuildOut = [result.get() for result in HMMBUILDQueue]\n HMMBuildPool.close()\n HMMBuildPool.join()\n with open(os.path.join(Log_Dir, \"HMMBuild.txt\"), \"w\") as HMMer_Log:\n HMMer_Log.writelines(HMMBuildOut)\n Alignments.sort()\n Profiles.sort()\n Output.write(\"Performing profile HMM searches using HMMSearch...\\n\")\n HMMSearchPool = multiprocessing.Pool(Threads)\n HMMSEARCHQueue = []\n for index, Protein_Alignment in enumerate(Profiles):\n HMMSEARCHQueue.append(HMMSearchPool.apply_async(Run_HMMSEARCH, args=(Alignment_Dir, Protein_Alignment, index, Genome_Dir, Representative_File, HMMSEARCH, Output)))\n HMMSearchOut = [result.get() for result in HMMSEARCHQueue]\n HMMSearchPool.close()\n HMMSearchPool.join()\n with open(os.path.join(Log_Dir, \"HMMSearch.txt\"), \"w\") as HMMer_Log:\n HMMer_Log.writelines([Out[1] for Out in HMMSearchOut])\n Exit_Terms = [\"------ inclusion threshold ------\", \"[No hits detected that satisfy reporting thresholds]\",\n \"Domain annotation for each sequence:\"]\n Genome_Num = len([x for x in os.listdir(Genome_Dir) if os.path.splitext(x)[1].lower() in Accepted_Filetypes])\n for output in HMMSearchOut:\n Primary_Cluster = CDHIT_Clusters_Only_Multi[int(Profiles[output[0]].split(\"_\")[-1]) - 1][0]\n Primary_Cluster_Whole = CDHIT_Clusters_Only_Multi[int(Profiles[output[0]].split(\"_\")[-1]) - 1]\n try:\n Primary_Index = HMM_Clusters.index(Primary_Cluster_Whole)\n except (IndexError, ValueError):\n continue\n Primary_Genomes = [ID[:5] for ID in HMM_Clusters[Primary_Index]]\n for ID in Primary_Genomes:\n if Primary_Genomes.count(ID) > 1:\n Primary_Genomes.remove(ID)\n Primary_Len = len(Primary_Genomes)\n if Primary_Len < Genome_Num:\n for line in output[1].splitlines():\n if Exit_Terms[0] in line or Exit_Terms[1] in line or Exit_Terms[2] in line:\n break\n elif line[60:90].strip().isdigit():\n Secondary_Cluster = \">\" + line[60:90].strip()\n if Primary_Cluster != Secondary_Cluster:\n try:\n Secondary_Index = HMM_Cluster_Reps.index(Secondary_Cluster)\n Secondary_Genomes = [ID[:5] for ID in HMM_Clusters[Secondary_Index]]\n for ID in Secondary_Genomes:\n if Secondary_Genomes.count(ID) > 1:\n Secondary_Genomes.remove(ID)\n Secondary_Len = len(Secondary_Genomes)\n if Primary_Len + Secondary_Len < Genome_Num:\n HMM_Clusters[Primary_Index].extend(HMM_Clusters[Secondary_Index])\n del HMM_Clusters[Secondary_Index]\n del HMM_Cluster_Reps[Secondary_Index]\n break\n except (IndexError, ValueError):\n continue\n return HMM_Clusters\n\n\ndef HMM_Representative_File(Genome_Dir, Concat_Gen_Dict, HMM_Clusters):\n \"\"\"Generates a file with a single protein representing each protein family for accelerated searching\"\"\"\n with open(os.path.join(Genome_Dir, \"HMM_Clusters\"), \"w\") as Rep_File:\n for cluster in HMM_Clusters:\n Rep_File.write(cluster[0] + \"\\n\")\n Rep_File.write(Concat_Gen_Dict[cluster[0]] + \"\\n\")\n\n\ndef ClusterCoreProts(Genome_Dir, Protein_Dir, Alignment_Dir, CDHIT, ClustalOmega, HMMBUILD, HMMSEARCH, Log_Dir, Threads, Output, Fast_Cluster):\n \"\"\"Automated core protein family identification\"\"\"\n ConcatenatedGenomeFile = os.path.join(Genome_Dir, \"ConcatenatedGenomeFiles\")\n Output.write(\"Iteratively clustering proteins...\\n\")\n CDHIT_Clusters, Cluster_File = Run_CDHIT(Genome_Dir, CDHIT, Log_Dir, ConcatenatedGenomeFile, Output)\n Output.write(\"Iterative clustering step complete.\\n\")\n Concat_Gen_Dict = Parse_Fasta(ConcatenatedGenomeFile)\n CDHIT_Clusters_Only_Multi = Remove_Singletons(CDHIT_Clusters)\n Create_Prot_Files(Protein_Dir, Concat_Gen_Dict, CDHIT_Clusters_Only_Multi)\n if Fast_Cluster:\n return CDHIT_Clusters_Only_Multi, Concat_Gen_Dict\n else:\n Output.write(\"Aligning \" + str(len(os.listdir(Protein_Dir))) + \" putative protein families...\\n\")\n ParallelAlignment(Protein_Dir, Alignment_Dir, ClustalOmega, Log_Dir, Threads, True, [], Output)\n Output.write(\"Protein family alignment step complete.\\n\")\n Output.write(\"Building HMM profiles for protein families...\\n\")\n HMM_Clusters_1 = HMM_Clustering(Genome_Dir, Alignment_Dir, HMMBUILD, HMMSEARCH, CDHIT_Clusters,\n CDHIT_Clusters_Only_Multi, Log_Dir, os.path.basename(Cluster_File), Threads,\n Output)\n HMM_Clusters_Only_Multi_1 = Remove_Singletons(HMM_Clusters_1)\n Remove_Changed_Clusters(Protein_Dir, CDHIT_Clusters_Only_Multi, HMM_Clusters_Only_Multi_1)\n Remove_Changed_Clusters(Alignment_Dir, CDHIT_Clusters_Only_Multi, HMM_Clusters_Only_Multi_1)\n Create_Prot_Files(Protein_Dir, Concat_Gen_Dict, HMM_Clusters_Only_Multi_1)\n Output.write(\"Aligning modified protein families...\\n\")\n ParallelAlignment(Protein_Dir, Alignment_Dir, ClustalOmega, Log_Dir, Threads, True, [], Output)\n HMM_Representative_File(Genome_Dir, Concat_Gen_Dict, HMM_Clusters_1)\n Output.write(\"Building HMM profiles for modified protein families...\\n\")\n HMM_Clusters_2 = HMM_Clustering(Genome_Dir, Alignment_Dir, HMMBUILD, HMMSEARCH, HMM_Clusters_1,\n HMM_Clusters_Only_Multi_1, Log_Dir, \"HMM_Clusters\", Threads, Output)\n HMM_Clusters_Only_Multi_2 = Remove_Singletons(HMM_Clusters_2)\n Remove_Changed_Clusters(Protein_Dir, HMM_Clusters_Only_Multi_1, HMM_Clusters_Only_Multi_2)\n Remove_Changed_Clusters(Alignment_Dir, HMM_Clusters_Only_Multi_1, HMM_Clusters_Only_Multi_2)\n Create_Prot_Files(Protein_Dir, Concat_Gen_Dict, HMM_Clusters_Only_Multi_2)\n return HMM_Clusters_Only_Multi_2, Concat_Gen_Dict\n\n\ndef Run_JACKHMMER(Input_Fasta_Dict, Target_Proteins, ConcatenatedGenomeFile, JACKHMMER, Log_Dir, Output):\n \"\"\"Runs Jackhammer using input fasta file and parses output\"\"\"\n Protein_Fams = {}\n JACKHMMER_Output = \"\"\n cmd = [JACKHMMER, \"-E\", \"1e-5\", \"--incE\", \"1e-20\", \"--noali\", Target_Proteins, ConcatenatedGenomeFile]\n try:\n JACKHMMER_Output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as call_err:\n with open(os.path.join(Log_Dir, \"JACKHMMER.txt\"), \"a\") as HMMer_Log:\n HMMer_Log.write(JACKHMMER_Output)\n HMMer_Log.write(\"\\n\" + \"Command: \" + str(call_err.cmd) + \"\\nError: \" + str(call_err.output))\n Output.error(\"\\nJACKHMMER Error. Check Logs.\\n\")\n except OSError as os_err:\n with open(os.path.join(Log_Dir, \"JACKHMMER.txt\"), \"a\") as HMMer_Log:\n HMMer_Log.write(JACKHMMER_Output)\n HMMer_Log.write(\"\\n\" + str(os_err.strerror))\n Output.error(\"\\nJACKHMMER Error. Check Logs.\\n\")\n with open(os.path.join(Log_Dir, \"JACKHMMER.txt\"), \"a\") as HMMer_Log:\n HMMer_Log.write(JACKHMMER_Output)\n Exit_Terms = [\"------ inclusion threshold ------\", \"[No hits detected that satisfy reporting thresholds]\"]\n Prot_Found = False\n Prot_Name = \"\"\n for line in JACKHMMER_Output.splitlines():\n if \"Query:\" in line or \"Description:\" in line:\n Prot_Found = True\n for key in Input_Fasta_Dict.keys():\n if key.lstrip(\">\") in line or \" \".join(key.lstrip(\">\").split(\" \")[1:]) in line and \".\" in key.lstrip(\">\").split(\" \")[0]:\n Prot_Name = Replace_Chars(key.lstrip(\">\"))\n if Prot_Name in Protein_Fams.keys() and Protein_Fams[Prot_Name] == []:\n count = 2\n for x in range(10000):\n if Prot_Name in Protein_Fams.keys() and Prot_Name + \"_\" + str(\n count) not in Protein_Fams.keys():\n Prot_Name += \"_\" + str(count)\n break\n else:\n count += 1\n if Prot_Name not in Protein_Fams.keys():\n Protein_Fams[Prot_Name] = []\n break\n elif Exit_Terms[0] in line or Exit_Terms[1] in line:\n Prot_Found = False\n elif \"@@ Round:\" in line:\n Prot_Found = True\n elif line[60:90].strip().isdigit() and Prot_Found:\n for ID in Protein_Fams[Prot_Name]:\n if line[60:].startswith(ID[1:5]):\n break\n else:\n Protein_Fams[Prot_Name].append(\">\" + line[60:90].strip())\n return Protein_Fams\n\n\ndef Find_Proteins(Genome_Dir, Protein_Dir, GLIMPSe_Output_Dir, Genome_Dictionary, Target_Proteins, JACKHMMER, PAMatrix, Log_Dir, Output):\n \"\"\"Protein family identification based on input fasta files\"\"\"\n ConcatenatedGenomeFile = os.path.join(Genome_Dir, \"ConcatenatedGenomeFiles\")\n Concat_Gen_Dict = Parse_Fasta(ConcatenatedGenomeFile)\n Input_Fasta_Dict = Parse_Fasta(Target_Proteins)\n Output.write(\"Performing JACKHMMer serches for protein families...\\n\")\n Protein_Fams = Run_JACKHMMER(Input_Fasta_Dict, Target_Proteins, ConcatenatedGenomeFile, JACKHMMER, Log_Dir, Output)\n Create_Named_Prot_Files(Protein_Dir, Concat_Gen_Dict, Protein_Fams, Output)\n if PAMatrix:\n Output.write(\"Producing Presence Absence Matrix...\\n\")\n Build_PAMatrix(Protein_Fams, GLIMPSe_Output_Dir, Genome_Dictionary, Output)\n return Protein_Fams\n\n\ndef Marker_HMMSEARCH(Marker_Names, Marker_File, ConcatenatedGenomeFile, HMMSEARCH, Log_Dir, Output):\n \"\"\"Runs HMMSearch using PhyEco Markers and parses output\"\"\"\n Protein_Fams = {}\n HMMSEARCH_Output = \"\"\n cmd = [HMMSEARCH, \"-E\", \"1e-5\", \"--incE\", \"1e-20\", \"--noali\", Marker_File, ConcatenatedGenomeFile]\n try:\n HMMSEARCH_Output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as call_err:\n with open(os.path.join(Log_Dir, \"HMMSEARCH.txt\"), \"a\") as HMMer_Log:\n HMMer_Log.write(HMMSEARCH_Output)\n HMMer_Log.write(\"\\n\" + \"Command: \" + str(call_err.cmd) + \"\\nError: \" + str(call_err.output))\n Output.error(\"\\nHMMSEARCH Error. Check Logs.\\n\")\n except OSError as os_err:\n with open(os.path.join(Log_Dir, \"HMMSEARCH.txt\"), \"a\") as HMMer_Log:\n HMMer_Log.write(HMMSEARCH_Output)\n HMMer_Log.write(\"\\n\" + str(os_err.strerror))\n Output.error(\"\\nHMMSEARCH Error. Check Logs.\\n\")\n with open(os.path.join(Log_Dir, \"HMMSEARCH.txt\"), \"a\") as HMMer_Log:\n HMMer_Log.write(HMMSEARCH_Output)\n Exit_Terms = [\"------ inclusion threshold ------\", \"[No hits detected that satisfy reporting thresholds]\", \"Domain annotation for each sequence:\"]\n Prot_Found = False\n Prot_Name = \"\"\n for line in HMMSEARCH_Output.splitlines():\n if \"Query:\" in line:\n Prot_Found = True\n for Name in Marker_Names:\n if re.search(\" \\w.+? \", line).group(0).strip() in Name:\n Prot_Name = Replace_Chars(Name)\n if Prot_Name not in Protein_Fams.keys():\n Protein_Fams[Prot_Name] = []\n break\n elif Exit_Terms[0] in line or Exit_Terms[1] in line:\n Prot_Found = False\n elif line[60:90].strip().isdigit() and Prot_Found:\n for ID in Protein_Fams[Prot_Name]:\n if line[60:].startswith(ID[1:5]):\n break\n else:\n Protein_Fams[Prot_Name].append(\">\" + line[60:90].strip())\n return Protein_Fams\n\n\ndef Find_Marker_Proteins(Genome_Dir, Protein_Dir, GLIMPSe_Output_Dir, Genome_Dictionary, HMMSEARCH, PAMatrix, Log_Dir, Marker_Dir, Marker_Proteins, Output):\n \"\"\"Protein family identification using PhyEco marker sets\"\"\"\n Marker_File = os.path.join(Marker_Dir, Marker_Proteins + \".hmm\")\n if not os.path.exists(Marker_File):\n Output.error(\"Cannot locate selected PhyEco marker set HMM profiles.\\n\")\n sys.exit()\n ConcatenatedGenomeFile = os.path.join(Genome_Dir, \"ConcatenatedGenomeFiles\")\n Concat_Gen_Dict = Parse_Fasta(ConcatenatedGenomeFile)\n Marker_Names = Parse_HMM(Marker_File)\n Output.write(\"Performing HMMSearch serches for protein families...\\n\")\n Protein_Fams = Marker_HMMSEARCH(Marker_Names, Marker_File, ConcatenatedGenomeFile, HMMSEARCH, Log_Dir, Output)\n Create_Named_Prot_Files(Protein_Dir, Concat_Gen_Dict, Protein_Fams, Output)\n if PAMatrix:\n Output.write(\"Producing Presence Absence Matrix...\\n\")\n Build_PAMatrix(Protein_Fams, GLIMPSe_Output_Dir, Genome_Dictionary, Output)\n return Protein_Fams\n\n\ndef Create_Named_Prot_Files(Protein_Dir, Concat_Gen_Dict, Protein_Fams, Output):\n \"\"\"Creates output protein files for named protein families\"\"\"\n for item in Protein_Fams.keys():\n if len(Protein_Fams[item]) > 1:\n with open(os.path.join(Protein_Dir, item + \".fasta\"), \"w\") as ProtFile:\n for sub_item in Protein_Fams[item]:\n ProtFile.write(sub_item + \"\\n\")\n ProtFile.write(Concat_Gen_Dict[sub_item] + \"\\n\")\n else:\n Output.error(\n \"Less than 2 copies of \" + item.lstrip(\">\") + \" were identified in the input genomes. \" + item.lstrip(\n \">\") + \" will not be included in further analysis.\\n\")\n\n\ndef Build_PAMatrix(HMM_Clusters, GLIMPSe_Output_Dir, Genome_Dictionary, Output):\n \"\"\"Generates PA Matrix\"\"\"\n ProtMatrix = {}\n IDs = sorted(Genome_Dictionary.keys())\n if isinstance(HMM_Clusters, list):\n Cluster_Dict = {}\n for num, item in enumerate(HMM_Clusters, 1):\n Cluster_Dict[\"Protein_Family_\" + str(num)] = item\n elif isinstance(HMM_Clusters, dict):\n Cluster_Dict = HMM_Clusters.copy()\n else:\n Output.error(\"\\nGLIMPSe pipeline Error. Check Logs.\\n\")\n sys.exit()\n with open(os.path.join(GLIMPSe_Output_Dir, \"PA_Matrix.tsv\"), \"w\") as PAMatrix:\n OutHeader = \"\"\n for ID in IDs:\n ProtMatrix[ID] = \"\"\n OutHeader = OutHeader + \"\\t\" + Genome_Dictionary[ID]\n OutRows = []\n for Protein_Fam in Cluster_Dict.keys():\n OutRow = \"\\n\" + Protein_Fam\n ProtDist = 0\n for ID in IDs:\n for Protein in Cluster_Dict[Protein_Fam]:\n if Protein.startswith(\">\" + str(ID)):\n OutRow = OutRow + \"\\t\" + \"1\"\n ProtDist += 1\n break\n else:\n OutRow = OutRow + \"\\t\" + \"0\"\n else:\n OutRows.append((ProtDist, OutRow))\n OutRows.sort()\n OutRows.reverse()\n PAMatrix.write(OutHeader)\n for item in OutRows:\n for index, binary in enumerate(item[1].split(\"\\t\")):\n if index > 0:\n ProtMatrix[IDs[index - 1]] = ProtMatrix[IDs[index - 1]] + binary\n if item[0] > 1:\n PAMatrix.write(item[1])\n return ProtMatrix, OutHeader\n\n\ndef Build_POCP(GLIMPSe_Output_Dir, Genome_Dictionary, ProtMatrix, OutHeader):\n \"\"\"Generates Percent of Shared Proteins Matrix.\n Uses output from PA Matrix.\"\"\"\n IDs = sorted(Genome_Dictionary.keys())\n with open(os.path.join(GLIMPSe_Output_Dir, \"PoCP_Matrix.tsv\"), \"w\") as PoCPMatrix:\n OutRows = []\n for ID_1 in IDs:\n OutRow = \"\\n\" + Genome_Dictionary[ID_1]\n for ID_2 in IDs:\n Match = 0\n length_1 = ProtMatrix[ID_1].count(\"1\")\n length_2 = ProtMatrix[ID_2].count(\"1\")\n Average_lenght = (float(length_1) + float(length_2)) / 2\n for index, binary in enumerate(ProtMatrix[ID_1]):\n if binary == \"1\" and ProtMatrix[ID_2][index] == \"1\":\n Match += 1\n OutRow = OutRow + \"\\t\" + str(float(Match) / float(Average_lenght))\n else:\n OutRows.append(OutRow)\n else:\n PoCPMatrix.write(OutHeader)\n PoCPMatrix.writelines(OutRows)\n\n\ndef Protein_Family_Filter(Protein_Dir, Alignment_Dir, Concat_Gen_Dict, HMM_Clusters, GLIMPSe_Output_Dir,\n Genome_Dictionary, PAMatrix, POCP, Single_Copy, Output):\n \"\"\"Generates protein matricies and filters paralogs/single copy\"\"\"\n Accepted_Clusters = [x[:] for x in HMM_Clusters]\n ProtMatrix, OutHeader = {}, \"\"\n if PAMatrix:\n Output.write(\"Producing Presence Absence Matrix...\\n\")\n ProtMatrix, OutHeader = Build_PAMatrix(HMM_Clusters, GLIMPSe_Output_Dir, Genome_Dictionary, Output)\n elif not PAMatrix and POCP:\n ProtMatrix, OutHeader = Build_PAMatrix(HMM_Clusters, GLIMPSe_Output_Dir, Genome_Dictionary, Output)\n os.remove(os.path.join(GLIMPSe_Output_Dir, \"PA_Matrix.tsv\"))\n if POCP:\n Output.write(\"Producing Percentage of Conserved Protein Family Matrix...\\n\")\n Build_POCP(GLIMPSe_Output_Dir, Genome_Dictionary, ProtMatrix, OutHeader)\n if Single_Copy:\n Removed_Clusters = []\n for item in Accepted_Clusters:\n IDs_Only = [x[1:5] for x in item]\n for ID in item:\n if IDs_Only.count(ID[1:5]) > 1:\n Removed_Clusters.append(item)\n break\n for item in Removed_Clusters:\n Accepted_Clusters[Accepted_Clusters.index(item)] = []\n elif not Single_Copy:\n for item in Accepted_Clusters:\n Removed_IDs = []\n Seen_IDs = []\n for ID in item:\n if ID[1:5] not in Seen_IDs:\n Seen_IDs.append(ID[1:5])\n else:\n Removed_IDs.append(ID)\n for ID in Removed_IDs:\n item.remove(ID)\n Accepted_Clusters = Remove_Singletons(Accepted_Clusters)\n Remove_Changed_Clusters(Protein_Dir, HMM_Clusters, Accepted_Clusters)\n Remove_Changed_Clusters(Alignment_Dir, HMM_Clusters, Accepted_Clusters)\n Create_Prot_Files(Protein_Dir, Concat_Gen_Dict, Accepted_Clusters)\n return Accepted_Clusters\n\n\ndef Determine_Protein_Distribution(Protein_Distribution, Protein_Clusters, Genome_Dictionary, Output):\n \"\"\"Assesses the distribution of protein families among genomes\"\"\"\n Accepted_Proteins = []\n Genome_Number = len(Genome_Dictionary.keys())\n if isinstance(Protein_Clusters, list):\n Cluster_Dict = {}\n for num, item in enumerate(Protein_Clusters, 1):\n Cluster_Dict[\"Protein_Family_\" + str(num)] = item\n elif isinstance(Protein_Clusters, dict):\n Cluster_Dict = Protein_Clusters.copy()\n else:\n Output.error(\"\\nGLIMPSe pipeline Error. Check Logs.\\n\")\n sys.exit()\n Protein_Names = Cluster_Dict.keys()\n for Protein_Name in Protein_Names:\n if len(Cluster_Dict[Protein_Name]) >= Genome_Number * Protein_Distribution:\n Accepted_Proteins.append(str(Protein_Name) + \".fasta\")\n if len(Accepted_Proteins) < 1:\n Output.error(\"\\nNo identified proteins meet protein distribution threshold\\n\")\n sys.exit()\n else:\n Prot_Num = len(Accepted_Proteins)\n Output.write(\"GLIMPS pipeline has identified \" + str(Prot_Num) + \" protein(s) which meet all selected criteria for core genome.\\n\")\n return Accepted_Proteins\n\n\n# Alignment and trimming of proteins is handled by the following functions\ndef Calculate_AIs(Alignment_Dir, Alignment, AIs):\n \"\"\"Calculates Aamino Acid Identity for a given amino acid alignment\"\"\"\n AI_Dict = {}\n Percent_ID = 0.0\n Aligned_Seqs = Parse_Fasta(os.path.join(Alignment_Dir, Alignment))\n Sorted_Seqs = sorted(Aligned_Seqs.keys())\n for index_1, key_1 in enumerate(Sorted_Seqs):\n for index_2 in range(index_1 + 1,len(Sorted_Seqs)):\n key_2 = Sorted_Seqs[index_2]\n for key in AIs.keys():\n if key_1[1:5] in key and key_2[1:5] in key:\n identical = 0\n length_1 = 0\n length_2 = 0\n for index, char in enumerate(Aligned_Seqs[key_1]):\n if char == Aligned_Seqs[key_2][index]:\n if char != \"-\":\n identical += 1\n if char != \"-\":\n length_1 += 1\n if Aligned_Seqs[key_2][index] != \"-\":\n length_2 += 1\n Percent_ID = float(identical) / float(min(length_1, length_2))\n AI_Dict[key] = Percent_ID\n break\n Return_List = [Alignment, AI_Dict]\n return Return_List\n\n\ndef Calculate_AAI(Alignment_Dir, Genome_Dictionary, Log_Dir, GLIMPSe_Output_Dir, Threads):\n \"\"\"Generates AAI matrix\"\"\"\n IDs = sorted(Genome_Dictionary.keys())\n AIs = {}\n for ID_1 in IDs:\n for ID_2 in IDs:\n if IDs.index(ID_2) > IDs.index(ID_1):\n AIs[(ID_1, ID_2)] = []\n Accepted_Filetypes = [\".fasta\", \".fsa\", \".faa\", \".fas\"]\n AIPool = multiprocessing.Pool(Threads)\n AIQueue = []\n for Alignment in os.listdir(Alignment_Dir):\n if os.path.splitext(Alignment)[1].lower() in Accepted_Filetypes:\n AIQueue.append(AIPool.apply_async(func=Calculate_AIs, args=(Alignment_Dir, Alignment, AIs)))\n AIOut = [result.get() for result in AIQueue]\n AIPool.close()\n AIPool.join()\n for result in AIOut:\n Align = result[0]\n for key in result[1].keys():\n AIs[key].append((Align, result[1][key]))\n AAIs = {}\n for key in sorted(AIs.keys()):\n AI_List = []\n total = float(0)\n if len(AIs[key]) < 1:\n AAIs[key] = \"N/A\"\n else:\n for item in AIs[key]:\n AI_List.append(item[1])\n total += item[1]\n Average = total / float(len(AI_List))\n AAIs[key] = Average\n AAI_Matrix = [\"\"]\n for ID in IDs:\n AAI_Matrix[0] += \"\\t\" + Genome_Dictionary[ID]\n AAI_Matrix.insert(IDs.index(ID) + 1, \"\\n\" + Genome_Dictionary[ID])\n for key_1 in sorted(AAIs.keys()):\n if ID == key_1[1]:\n AAI_Matrix[IDs.index(ID) + 1] += \"\\t\" + str(AAIs[key_1])\n else:\n AAI_Matrix[IDs.index(ID) + 1] += \"\\t\" + \"1\"\n for key_2 in sorted(AAIs.keys()):\n if ID == key_2[0]:\n AAI_Matrix[IDs.index(ID) + 1] += \"\\t\" + str(AAIs[key_2])\n with open(os.path.join(Log_Dir, \"Amino Acid Identities.tsv\"), \"w\") as Log:\n Log.write(\"Genome 1\\tGenome 2\\tAmino Acid Identities\\n\")\n for key in sorted(AIs.keys()):\n Log.write(Genome_Dictionary[key[0]] + \"\\t\" + Genome_Dictionary[key[1]])\n for item in AIs[key]:\n Log.write(\"\\t\" + os.path.splitext(item[0])[0] + \": \" + str(item[1]))\n else:\n Log.write(\"\\n\")\n with open(os.path.join(GLIMPSe_Output_Dir, \"AAI_Matrix.tsv\"), \"w\") as Out:\n Out.writelines(AAI_Matrix)\n\n\ndef Align_Proteins(Protein_Dir, Protein_File, Alignment_Dir, ClustalOmega, Count, First_Run, Output):\n \"\"\"Runs ClustalOmega\"\"\"\n ClustalOmega_Output = \"\"\n Accepted_Filetypes = [\".fasta\", \".fsa\", \".faa\", \".fas\"]\n if os.path.splitext(Protein_File)[1].lower() in Accepted_Filetypes:\n cmd = [ClustalOmega, \"-v\", \"--force\", \"--threads=1\", \"-i\", os.path.join(Protein_Dir, Protein_File), \"-o\",\n os.path.join(Alignment_Dir, Protein_File)]\n try:\n ClustalOmega_Output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as call_err:\n ClustalOmega_Output += \"\\n\" + \"Command: \" + str(call_err.cmd) + \"\\nError: \" + str(call_err.output)\n Output.error(\"\\nClustal Omega Error. Check Logs.\\n\")\n return ClustalOmega_Output\n except OSError as os_err:\n ClustalOmega_Output += \"\\n\" + str(os_err.strerror)\n Output.error(\"\\nClustal Omega Error. Check Logs.\\n\")\n return ClustalOmega_Output\n if not First_Run:\n if Count % 25 == 0:\n Output.write(\"Alignment of %d Protein families complete...\\n\" % Count)\n return ClustalOmega_Output\n\n\ndef ParallelAlignment(Protein_Dir, Alignment_Dir, ClustalOmega, Log_Dir, Threads, First_Run, Accepted_Proteins, Output):\n \"\"\"Manages parallel instances of ClustalOmega\"\"\"\n AlignPool = multiprocessing.Pool(Threads)\n ClustalQueue = []\n if len(Accepted_Proteins) > 0:\n for Count, Accepted_Protein in enumerate(Accepted_Proteins, 1):\n ClustalQueue.append(AlignPool.apply_async(func=Align_Proteins, args=(Protein_Dir, Accepted_Protein, Alignment_Dir, ClustalOmega, Count, First_Run, Output)))\n ClustalOut = [result.get() for result in ClustalQueue]\n AlignPool.close()\n AlignPool.join()\n with open(os.path.join(Log_Dir, \"ClustalOmega.txt\"), \"a\") as Clustal_Log:\n Clustal_Log.writelines(ClustalOut)\n else:\n Accepted_Filetypes = [\".fasta\", \".fsa\", \".faa\", \".fas\"]\n Accepted_Proteins_Files = []\n for Protein_File in os.listdir(Protein_Dir):\n if os.path.splitext(Protein_File)[1].lower() in Accepted_Filetypes and not os.path.exists(os.path.join(Alignment_Dir, Protein_File)):\n Accepted_Proteins_Files.append(Protein_File)\n for Count, Accepted_Proteins_File in enumerate(Accepted_Proteins_Files, 1):\n ClustalQueue.append(AlignPool.apply_async(func=Align_Proteins, args=(Protein_Dir, Accepted_Proteins_File, Alignment_Dir, ClustalOmega, Count, First_Run, Output)))\n ClustalOut = [result.get() for result in ClustalQueue]\n AlignPool.close()\n AlignPool.join()\n with open(os.path.join(Log_Dir, \"ClustalOmega.txt\"), \"a\") as Clustal_Log:\n Clustal_Log.writelines(ClustalOut)\n\n\n# Not used. Based on the weighted TrimAl algorithm described in Chang, Di Tommaso, & Notredame (2014).\ndef Create_Weighted_Alignments(Alignment_Dir, Accepted_Proteins, TrimAl, Output):\n \"\"\"Generates weighted alignments\"\"\"\n Alignment_Length = 0\n for Alignment_File in Accepted_Proteins:\n try:\n TrimAl_Output = subprocess.check_output([\n TrimAl,\n \"-in\",\n os.path.join(Alignment_Dir, Alignment_File),\n \"-sgc\"\n ])\n except subprocess.CalledProcessError as call_err:\n Output.error(\"\\nTrimAl Error.\\n\")\n Output.error(\"\\n\" + \"Command: \" + str(call_err.cmd) + \"\\nError: \" + str(call_err.output))\n sys.exit()\n except OSError as os_err:\n Output.error(\"\\nTrimAl Error.\\n\")\n Output.error(\"\\nError: \" + str(os_err.strerror))\n sys.exit()\n seq_score = []\n for line in TrimAl_Output.splitlines():\n score_str = re.search(\"(\\t\\t).*\\t(.*)\", line)\n if score_str is not None:\n score_flt = float(score_str.group(2).strip())\n column_multiplier = round(10 * 0.9 * score_flt + 1)\n seq_score.append(column_multiplier)\n with open(os.path.join(Alignment_Dir, Alignment_File), \"r\") as Unweighted_Alignment:\n All_Sequences_In_Alignment = {}\n Sequence_Name = \"\"\n for line in Unweighted_Alignment:\n if line.startswith(\">\"):\n Sequence_Name = line\n All_Sequences_In_Alignment[Sequence_Name] = \"\"\n else:\n All_Sequences_In_Alignment[Sequence_Name] += line.strip()\n with open(os.path.join(Alignment_Dir, \"Weighted_\" + Alignment_File), \"w\") as Weighted_Alignment:\n First_Line = True\n for Sequence_Key in All_Sequences_In_Alignment.keys():\n if First_Line:\n Weighted_Alignment.write(Sequence_Key)\n First_Line = False\n Alignment_Length += len(All_Sequences_In_Alignment[Sequence_Key])\n else:\n Weighted_Alignment.write(\"\\n\" + Sequence_Key)\n Weighted_Sequence = []\n for index, character in enumerate(All_Sequences_In_Alignment[Sequence_Key]):\n count = 0\n while count < seq_score[index]:\n Weighted_Sequence.append(character)\n count += 1\n else:\n Weighted_Sequence = \"\".join(Weighted_Sequence)\n Weighted_Alignment.write(Weighted_Sequence)\n return Alignment_Length\n\ndef Create_Trimmed_Alignments(Alignment_Dir, Accepted_Proteins, TrimAl, Polymorphic, Output):\n \"\"\"Generates trimmed alignments\"\"\"\n for Alignment_File in Accepted_Proteins:\n try:\n subprocess.call([\n TrimAl,\n \"-in\",\n os.path.join(Alignment_Dir, Alignment_File),\n \"-out\",\n os.path.join(Alignment_Dir, \"Trimmed_\" + Alignment_File),\n \"-automated1\"\n ])\n except subprocess.CalledProcessError as call_err:\n Output.error(\"\\nTrimAl Error.\\n\")\n Output.error(\"\\n\" + \"Command: \" + str(call_err.cmd) + \"\\nError: \" + str(call_err.output))\n sys.exit()\n except OSError as os_err:\n Output.error(\"\\nTrimAl Error.\\n\")\n Output.error(\"\\nError: \" + str(os_err.strerror))\n sys.exit()\n if Polymorphic == True:\n Align_In = Parse_Fasta(os.path.join(Alignment_Dir, \"Trimmed_\" + Alignment_File))\n Align_In_Keys = Align_In.keys()\n Invariants = []\n for index in range(len(Align_In[Align_In_Keys[0]])):\n for key in Align_In:\n if Align_In[Align_In_Keys[0]][index] != Align_In[key][index]:\n break\n else:\n Invariants.append(index)\n Invariants.reverse()\n for index in Invariants:\n for key in Align_In:\n Align_In[key] = Align_In[key][:index] + Align_In[key][(index + 1):]\n with open(os.path.join(Alignment_Dir, \"Trimmed_\" + Alignment_File), \"w\") as Align_Out:\n for key in Align_In:\n Align_Out.write(key + \"\\n\" + Align_In[key] + \"\\n\")\n\ndef Insert_Filler_Sequences(Accepted_Proteins, Alignment_Dir, Genome_Dictionary):\n \"\"\"Inserts blank sequences from organisms missing in an alignment\"\"\"\n for alignment in Accepted_Proteins:\n if os.path.exists(os.path.join(Alignment_Dir, \"Weighted_\" + alignment)):\n Alignment_File = \"Weighted_\" + alignment\n else:\n Alignment_File = \"Trimmed_\" + alignment\n align_length = 0\n align_string = []\n desc_found = False\n for Genome_ID in Genome_Dictionary:\n key_found = False\n with open(os.path.join(Alignment_Dir, Alignment_File), \"a+\") as Align:\n Align.seek(0, 0)\n for line in Align:\n if not desc_found and align_length == 0 and line.startswith(\">\"):\n desc_found = True\n elif desc_found and not line.startswith(\">\"):\n align_string.append(line.strip())\n elif desc_found and align_length == 0 and line.startswith(\">\"):\n align_string = \"\".join(align_string)\n align_length = len(align_string)\n desc_found = False\n if line.startswith(\">\" + Genome_ID):\n key_found = True\n else:\n if not key_found:\n filler = \"\".join([\">\", Genome_ID, \"\\n\", \"-\" * align_length, \"\\n\"])\n Align.write(filler)\n\n\ndef Concatenate_Alignments(Accepted_Proteins, Alignment_Dir, Concatenated_Dir, Genome_Dictionary, GLIMPSe_Output_Dir, Output):\n \"\"\"Concatenates all trimmed/weighted alignments\"\"\"\n Insert_Filler_Sequences(Accepted_Proteins, Alignment_Dir, Genome_Dictionary)\n Alignment_Length = 0\n Concatenated_Sequence = {}\n Genome_IDs = Genome_Dictionary.keys()\n Current_ID = \"\"\n for alignment in Accepted_Proteins:\n if os.path.exists(os.path.join(Alignment_Dir, \"Weighted_\" + alignment)):\n Alignment_File = \"Weighted_\" + alignment\n else:\n Alignment_File = \"Trimmed_\" + alignment\n with open(os.path.join(Alignment_Dir, Alignment_File), \"r\") as align:\n for line in align:\n if line[1:5] in Genome_IDs:\n Current_ID = line[1:5]\n if Current_ID not in Concatenated_Sequence:\n Concatenated_Sequence[Current_ID] = []\n elif not line.startswith(\">\"):\n Concatenated_Sequence[Current_ID].append(line.rstrip())\n for Genome_ID in Genome_IDs:\n Concatenated_Sequence[Genome_ID] = \"\".join(Concatenated_Sequence[Genome_ID])\n if re.search(\"[a-zA-Z]\", Concatenated_Sequence[Genome_ID]) is None:\n Output.write(\"Homologs for \" + Genome_Dictionary[Genome_ID] + \" were not identified for any protein. \" +\n Genome_Dictionary[Genome_ID] + \" will be excluded from further analysis.\\n\")\n else:\n with open(os.path.join(Concatenated_Dir, \"Concatenated_Alignment.fasta\"), \"a\") as AlignOut:\n AlignOut.write(\">\" + Genome_Dictionary[Genome_ID] + \"\\n\" + Concatenated_Sequence[Genome_ID] + \"\\n\")\n if Alignment_Length == 0:\n Alignment_Length = len(Concatenated_Sequence[Genome_ID])\n shutil.copy(os.path.join(Concatenated_Dir, \"Concatenated_Alignment.fasta\"),\n os.path.join(GLIMPSe_Output_Dir, \"Concatenated_Alignment.fasta\"))\n return Alignment_Length\n\n\n# Phylogenetic tree construction is handled by the following functions\n# TODO Add option to build individual phylogenetic tree for each protein\ndef Run_FastTree(Tree_Dir, Concatenated_Dir, FastTree, Log_Dir, Output):\n \"\"\"Builds a phylogenetic tree using FastTree\"\"\"\n Input_Alignment = os.path.join(Concatenated_Dir, \"Concatenated_Alignment.fasta\")\n Output_Tree = os.path.join(Tree_Dir, \"FastTree.nwk\")\n FastTree_Output = \"\"\n cmd = [FastTree,\n \"-spr\",\n \"6\",\n \"-mlacc\",\n \"3\",\n \"-slownni\",\n \"-slow\",\n \"-nosupport\",\n \"-out\",\n Output_Tree,\n Input_Alignment\n ]\n try:\n FastTree_Output = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate()[0]\n if os.path.getsize(Output_Tree) == 0:\n raise subprocess.CalledProcessError(1, str(cmd), str(FastTree_Output))\n else:\n with open(os.path.join(Log_Dir, \"FastTree.txt\"), \"a\") as FastTree_Log:\n FastTree_Log.write(FastTree_Output)\n except (subprocess.CalledProcessError, OSError):\n with open(os.path.join(Log_Dir, \"FastTree.txt\"), \"a\") as FastTree_Log:\n FastTree_Log.write(FastTree_Output)\n Output.error(\"\\nFastTree Error. Check Logs.\\n\")\n sys.exit()\n\n\ndef Run_FastTree_Full(Tree_Dir, Concatenated_Dir, FastTree, Log_Dir, Output, GLIMPSe_Output_Dir):\n \"\"\"Builds a phylogenetic tree using FastTree and generates statistical branch support\"\"\"\n Input_Alignment = os.path.join(Concatenated_Dir, \"Concatenated_Alignment.fasta\")\n Output_Tree = os.path.join(Tree_Dir, \"FastTree.nwk\")\n FastTree_Output = \"\"\n cmd = [FastTree,\n \"-spr\",\n \"6\",\n \"-mlacc\",\n \"3\",\n \"-slownni\",\n \"-slow\",\n \"-out\",\n Output_Tree,\n Input_Alignment\n ]\n try:\n FastTree_Output = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate()[0]\n if os.path.getsize(Output_Tree) == 0:\n raise subprocess.CalledProcessError(1, str(cmd), str(FastTree_Output))\n else:\n with open(os.path.join(Log_Dir, \"FastTree.txt\"), \"a\") as FastTree_Log:\n FastTree_Log.write(FastTree_Output)\n except (subprocess.CalledProcessError, OSError):\n with open(os.path.join(Log_Dir, \"FastTree.txt\"), \"a\") as FastTree_Log:\n FastTree_Log.write(FastTree_Output)\n Output.error(\"\\nFastTree Error. Check Logs.\\n\")\n sys.exit()\n try:\n import dendropy\n except ImportError:\n dendropy = None\n shutil.copy(os.path.join(Tree_Dir, \"FastTree.nwk\"),\n os.path.join(GLIMPSe_Output_Dir, \"Final_Tree.nwk\"))\n if dendropy:\n with open(os.path.join(Tree_Dir, \"FastTree.nwk\"), \"r\") as In_Tree:\n Tree = dendropy.Tree.get_from_string(In_Tree.readline(), \"newick\")\n Tree.ladderize(ascending=False)\n with open(os.path.join(GLIMPSe_Output_Dir, \"Final_Tree.nwk\"), \"w\") as Out_Tree:\n Out_Tree.write(Tree.as_string(schema='newick'))\n\n\ndef Run_RAxML(Tree_Dir, Concatenated_Dir, RAxML, Threads, Log_Dir, GLIMPSe_Output_Dir, Output):\n \"\"\"Optimizes FastTree phylogeny using RAxML\"\"\"\n Input_Alignment = os.path.join(Concatenated_Dir, \"Concatenated_Alignment.fasta\")\n Input_FastTree = os.path.join(Tree_Dir, \"FastTree.nwk\")\n Input_RAxML = os.path.join(Tree_Dir, \"RAxML_result.ML\")\n RAxML_Output = \"\"\n cmd = [RAxML,\n \"-f\",\n \"d\",\n \"-F\",\n \"-T\",\n str(Threads),\n \"-m\",\n \"PROTCATLG\",\n \"-n\",\n \"ML\",\n \"-p\",\n str(random.randrange(1, 100000)),\n \"-t\",\n Input_FastTree,\n \"-s\",\n Input_Alignment,\n \"-w\",\n Tree_Dir\n ]\n try:\n RAxML_Output = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate()[0]\n count = 0\n while not os.path.exists(Input_RAxML) and count < 15:\n count += 1\n time.sleep(1)\n if not os.path.exists(Input_RAxML):\n raise subprocess.CalledProcessError(1, str(cmd), str(RAxML_Output))\n except (subprocess.CalledProcessError, OSError):\n with open(os.path.join(Log_Dir, \"RAxML.txt\"), \"w\") as RAxML_Log:\n RAxML_Log.write(RAxML_Output)\n Output.error(\"\\nRAxML Error. Check Logs.\\n\")\n sys.exit()\n else:\n cmd = [RAxML,\n \"-f\",\n \"J\",\n \"-T\",\n str(Threads),\n \"-m\",\n \"PROTCATLG\",\n \"-n\",\n \"SH\",\n \"-p\",\n str(random.randrange(1, 100000)),\n \"-t\",\n Input_RAxML,\n \"-s\",\n Input_Alignment,\n \"-w\",\n Tree_Dir\n ]\n SH_Output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n with open(os.path.join(Log_Dir, \"RAxML.txt\"), \"w\") as RAxML_Log:\n RAxML_Log.write(RAxML_Output)\n RAxML_Log.write(SH_Output)\n try:\n import dendropy\n except ImportError:\n dendropy = None\n shutil.copy(os.path.join(Tree_Dir, \"RAxML_fastTreeSH_Support.SH\"),\n os.path.join(GLIMPSe_Output_Dir, \"Final_Tree.nwk\"))\n if dendropy:\n with open(os.path.join(Tree_Dir, \"RAxML_fastTreeSH_Support.SH\"), \"r\") as In_Tree:\n Tree = dendropy.Tree.get_from_string(In_Tree.readline(), \"newick\")\n Tree.ladderize(ascending=False)\n with open(os.path.join(GLIMPSe_Output_Dir, \"Final_Tree.nwk\"), \"w\") as Out_Tree:\n Out_Tree.write(Tree.as_string(schema='newick'))\n\n\ndef GenomeID2GenomeName(Protein_Dir, Alignment_Dir, Genome_Dictionary):\n \"\"\"Renames Sequences in Alignment Files\"\"\"\n for Protein_File in os.listdir(Protein_Dir):\n with open(os.path.join(Protein_Dir, Protein_File), \"r\") as ProtIn:\n Prot_Temp = ProtIn.readlines()\n for key in Genome_Dictionary:\n for item in Prot_Temp:\n if key in item[1:5]:\n Prot_Temp[Prot_Temp.index(item)] = \">\" + Genome_Dictionary[key] + \"\\n\"\n with open(os.path.join(Protein_Dir, Protein_File), \"w\") as ProtOut:\n ProtOut.writelines(Prot_Temp)\n for Alignment_File in os.listdir(Alignment_Dir):\n with open(os.path.join(Alignment_Dir, Alignment_File), \"r\") as AlignIn:\n Align_Temp = AlignIn.readlines()\n for key in Genome_Dictionary:\n for item in Align_Temp:\n if key in item[1:5]:\n Align_Temp[Align_Temp.index(item)] = \">\" + Genome_Dictionary[key] + \"\\n\"\n with open(os.path.join(Alignment_Dir, Alignment_File), \"w\") as AlignOut:\n AlignOut.writelines(Align_Temp)\n\n\ndef GLIMPSe_log(Alignment_Dir, Protein_Distribution, Alignment_Length, HMMer_Time, ClustalOmega_Time, FastTree_Time,\n RAxML_Time, Pipeline_Time, Alignment_Filtering, Log_Dir):\n \"\"\"Generates log for pipeline\"\"\"\n with open(os.path.join(Log_Dir, \"Pipeline Log.txt\"), \"w\") as log:\n Accepted_Protein_Number = 0\n if Alignment_Filtering == \"Trim\":\n for alignment in os.listdir(Alignment_Dir):\n if alignment.startswith(\"Trimmed_\"):\n Accepted_Protein_Number += 1\n log.write(\"Number of accepted protein families in concatenated alignment = \" + str(Accepted_Protein_Number))\n log.write(\n \"\\nMinimum proportion of organisms (organisms identified/total number of organisms) in accepted protein families = \" + str(\n Protein_Distribution * 100) + \"%\")\n log.write(\"\\nLenght of trimmed concatenated alignment = \" + str(Alignment_Length))\n log.write(\"\\nThe following proteins were used in the concatenated alignment:\")\n for alignment in os.listdir(Alignment_Dir):\n if alignment.startswith(\"Trimmed_\"):\n log.write(\"\\n\" + os.path.splitext(alignment)[0][8:])\n elif Alignment_Filtering == \"Weight\":\n for alignment in os.listdir(Alignment_Dir):\n if alignment.startswith(\"Weighted_\"):\n Accepted_Protein_Number += 1\n log.write(\"Number of accepted protein families in concatenated alignment = \" + str(Accepted_Protein_Number))\n log.write(\n \"\\nMinimum proportion of organisms (organisms identified/total number of organisms) in accepted protein families = \" + str(\n Protein_Distribution * 100) + \"%\")\n log.write(\"\\nLenght of unweighted concatenated alignment = \" + str(Alignment_Length))\n log.write(\"\\nThe following proteins were used in the concatenated alignment:\")\n for alignment in os.listdir(Alignment_Dir):\n if alignment.startswith(\"Weighted_\"):\n log.write(\"\\n\" + os.path.splitext(alignment)[0][9:])\n log.write(\"\\nProtein Family Identification Duration = \" + str(round(HMMer_Time, 2)) + \" seconds\")\n log.write(\"\\nDuration of Final ClustalOmega Alignments = \" + str(round(ClustalOmega_Time, 2)) + \" seconds\")\n log.write(\"\\nOperational time of FastTree = \" + str(round(FastTree_Time, 2)) + \" seconds\")\n log.write(\"\\nOperational time of RAxML = \" + str(round(RAxML_Time, 2)) + \" seconds\")\n log.write(\"\\nTotal operational time of pipeline = \" + str(round(Pipeline_Time, 2)) + \" seconds\")\n\n\ndef Core_Pipeline(Input_Directory, Target_Proteins, Protein_Distribution, Alignment_Filtering, PAMatrix, POCP, AAI,\n Single_Copy, Marker_Proteins, Fast_Cluster, Fast_Phylogeny, No_Tree, Polymorphic, Genome_Dir, Protein_Dir,\n Alignment_Dir, Concatenated_Dir, Tree_Dir, Log_Dir, GLIMPSe_Output_Dir, Marker_Dir, CDHIT, JACKHMMER,\n HMMBUILD, HMMSEARCH, ClustalOmega, TrimAl, FastTree, RAxML, Threads, stdout_messenger,\n stderr_messenger):\n \"\"\"Main pipeline\"\"\"\n Output = GLIMPS_Writer(stdout_messenger, stderr_messenger)\n Pipeline_Start = time.time()\n Output.write(\":::PIPELINE PREPERATION:::\\n\")\n if Marker_Proteins != \"\":\n if not os.path.exists(os.path.join(Marker_Dir, Marker_Proteins + \".hmm\")):\n Output.write(\"Extracting PhyEco Marker proteins...\\n\")\n try:\n with tarfile.open(os.path.join(Marker_Dir, \"PhyEco Marker Protein Families.tar.bz2\"), \"r:bz2\") as PhyEco:\n def is_within_directory(directory, target):\n \n abs_directory = os.path.abspath(directory)\n abs_target = os.path.abspath(target)\n \n prefix = os.path.commonprefix([abs_directory, abs_target])\n \n return prefix == abs_directory\n \n def safe_extract(tar, path=\".\", members=None, *, numeric_owner=False):\n \n for member in tar.getmembers():\n member_path = os.path.join(path, member.name)\n if not is_within_directory(path, member_path):\n raise Exception(\"Attempted Path Traversal in Tar File\")\n \n tar.extractall(path, members, numeric_owner=numeric_owner) \n \n \n safe_extract(PhyEco, Marker_Dir)\n Output.write(\"PhyEco Marker proteins extracted.\\n\")\n except IOError:\n Output.error(\"Unable to detect or extract all PhyEco Markers.\\n\")\n sys.exit()\n Output.write(\"Initial processing of input files...\\n\")\n Genome_Dictionary = Process_Genome_Files(Input_Directory, Genome_Dir, Log_Dir, Output)\n Output.write(\"Initial processing of input files complete.\\n\")\n\n Output.write(\"\\n:::PROTEIN FAMILY IDENTIFICATION:::\\n\")\n if os.path.exists(Target_Proteins):\n Output.write(\"Identifying protein families...\\n\")\n HMMer_Start = time.time()\n Protein_Clusters = Find_Proteins(Genome_Dir, Protein_Dir, GLIMPSe_Output_Dir, Genome_Dictionary, Target_Proteins, JACKHMMER, PAMatrix, Log_Dir, Output)\n HMMer_Time = time.time() - HMMer_Start\n Accepted_Proteins = Determine_Protein_Distribution(Protein_Distribution, Protein_Clusters, Genome_Dictionary, Output)\n Output.write(\"All protein families identiefied.\\n\")\n elif Marker_Proteins != \"\":\n Output.write(\"Identifying protein families...\\n\")\n HMMer_Start = time.time()\n Protein_Clusters = Find_Marker_Proteins(Genome_Dir, Protein_Dir, GLIMPSe_Output_Dir, Genome_Dictionary, HMMSEARCH, PAMatrix, Log_Dir, Marker_Dir, Marker_Proteins, Output)\n HMMer_Time = time.time() - HMMer_Start\n Accepted_Proteins = Determine_Protein_Distribution(Protein_Distribution, Protein_Clusters, Genome_Dictionary, Output)\n Output.write(\"All protein families identiefied.\\n\")\n else:\n Output.write(\"Identifying core protein families...\\n\")\n HMMer_Start = time.time()\n HMM_Clusters, Concat_Gen_Dict = ClusterCoreProts(Genome_Dir, Protein_Dir, Alignment_Dir, CDHIT, ClustalOmega, HMMBUILD, HMMSEARCH, Log_Dir, Threads, Output, Fast_Cluster)\n HMMer_Time = time.time() - HMMer_Start\n Filtered_Clusters = Protein_Family_Filter(Protein_Dir, Alignment_Dir, Concat_Gen_Dict, HMM_Clusters, GLIMPSe_Output_Dir, Genome_Dictionary, PAMatrix, POCP, Single_Copy, Output)\n Accepted_Proteins = Determine_Protein_Distribution(Protein_Distribution, Filtered_Clusters, Genome_Dictionary, Output)\n Output.write(\"All protein families identiefied.\\n\")\n\n Output.write(\"\\n:::PROTEIN FAMILY ALIGNMENT AND TRIMMING:::\\n\")\n ClustalOmega_Start = time.time()\n if AAI:\n Output.write(\"Performing ClustalOmega alignments on all putative protein families...\\n\")\n ParallelAlignment(Protein_Dir, Alignment_Dir, ClustalOmega, Log_Dir, Threads, False, [], Output)\n ClustalOmega_Time = time.time() - ClustalOmega_Start\n Output.write(\"All ClustalOmega alignments complete.\\n\")\n Output.write(\"Producing Average Amino Acid Identity Matrix...\\n\")\n Calculate_AAI(Alignment_Dir, Genome_Dictionary, Log_Dir, GLIMPSe_Output_Dir, Threads)\n Output.write(\"Average Amino Acid Identity Matrix Produced.\\n\")\n elif not AAI:\n Output.write(\"Performing ClustalOmega alignments on protein families which meet selected criteria for core genome...\\n\")\n ParallelAlignment(Protein_Dir, Alignment_Dir, ClustalOmega, Log_Dir, Threads, False, Accepted_Proteins, Output)\n ClustalOmega_Time = time.time() - ClustalOmega_Start\n Output.write(\"All ClustalOmega alignments complete.\\n\")\n else:\n Output.error(\"\\nGLIMPSe pipeline error. Check Logs.\\n\")\n sys.exit()\n Alignment_Length = 0\n if Alignment_Filtering == \"Trim\":\n Output.write(\"Trimming alignments...\\n\")\n Create_Trimmed_Alignments(Alignment_Dir, Accepted_Proteins, TrimAl, Polymorphic, Output)\n Output.write(\"All alignments trimmed.\\n\")\n elif Alignment_Filtering == \"Weight\":\n Output.write(\"Creating weighted alignments...\\n\")\n Alignment_Length = Create_Weighted_Alignments(Alignment_Dir, Accepted_Proteins, TrimAl, Output)\n Output.write(\"Weighted alignments created.\\n\")\n Output.write(\"Concatenating alignments...\\n\")\n if Alignment_Filtering == \"Trim\":\n Alignment_Length = Concatenate_Alignments(Accepted_Proteins, Alignment_Dir, Concatenated_Dir, Genome_Dictionary, GLIMPSe_Output_Dir, Output)\n elif Alignment_Filtering == \"Weight\":\n Concatenate_Alignments(Accepted_Proteins, Alignment_Dir, Concatenated_Dir, Genome_Dictionary, GLIMPSe_Output_Dir, Output)\n else:\n Output.error(\"\\nGLIMPSe pipeline error. Check Logs.\\n\")\n sys.exit()\n Output.write(\"Alignments concatenated.\\n\")\n\n if No_Tree:\n FastTree_Time = 0\n RAxML_Time = 0\n else:\n Output.write(\"\\n:::PHYLOGENETIC TREE CONSTRUCTION:::\\n\")\n if Fast_Phylogeny:\n Output.write(\"Building tree using FastTree...\\n\")\n FastTree_Start = time.time()\n Run_FastTree_Full(Tree_Dir, Concatenated_Dir, FastTree, Log_Dir, Output, GLIMPSe_Output_Dir)\n FastTree_Time = time.time() - FastTree_Start\n RAxML_Time = 0\n Output.write(\"Phylogenetic tree completed.\\n\")\n else:\n Output.write(\"Building initial tree using FastTree...\\n\")\n FastTree_Start = time.time()\n Run_FastTree(Tree_Dir, Concatenated_Dir, FastTree, Log_Dir, Output)\n FastTree_Time = time.time() - FastTree_Start\n Output.write(\"Using RaxML to optimize tree...\\n\")\n RAxML_Start = time.time()\n Run_RAxML(Tree_Dir, Concatenated_Dir, RAxML, Threads, Log_Dir, GLIMPSe_Output_Dir, Output)\n RAxML_Time = time.time() - RAxML_Start\n Output.write(\"Final phylogenetic tree completed.\\n\")\n\n Output.write(\"\\nWriting output files...\\n\")\n GenomeID2GenomeName(Protein_Dir, Alignment_Dir, Genome_Dictionary)\n Pipeline_Time = time.time() - Pipeline_Start\n GLIMPSe_log(Alignment_Dir, Protein_Distribution, Alignment_Length, HMMer_Time, ClustalOmega_Time, FastTree_Time, RAxML_Time, Pipeline_Time, Alignment_Filtering, Log_Dir)\n Output.write(\"\\nPipeline complete in %s seconds\\nOutput files can be located in %s \\n\" % (str(round(Pipeline_Time, 2)), GLIMPSe_Output_Dir))\n\n\ndef main():\n args = check_arguments()\n stdout_messenger = multiprocessing.Manager().Queue()\n stderr_messenger = multiprocessing.Manager().Queue()\n Genome_Dir, Protein_Dir, Alignment_Dir, Concatenated_Dir, Tree_Dir, Log_Dir, GLIMPSe_Output_Dir, Dependency_Dir, Marker_Dir = Build_Output_Dirs(\n args.Output_Directory, stdout_messenger, stderr_messenger)\n CDHIT, JACKHMMER, HMMBUILD, HMMSEARCH, ClustalOmega, TrimAl, FastTree, RAxML, Threads = Prepare_Dependencies(Dependency_Dir, stdout_messenger, stderr_messenger)\n Core_Pipeline(args.Input_Directory, args.Target_Proteins, args.Protein_Distribution, args.Alignment_Filtering,\n args.PAMatrix, args.POCP, args.AAI, args.Single_Copy, args.Marker_Proteins, args.Fast_Cluster,\n args.Fast_Phylogeny, args.No_Tree, args.Polymorphic, Genome_Dir, Protein_Dir, Alignment_Dir, Concatenated_Dir, Tree_Dir,\n Log_Dir, GLIMPSe_Output_Dir, Marker_Dir, CDHIT, JACKHMMER, HMMBUILD, HMMSEARCH, ClustalOmega, TrimAl,\n FastTree, RAxML, Threads, stdout_messenger, stderr_messenger)\n\n\nif __name__ == '__main__':\n multiprocessing.freeze_support()\n main()\n","repo_name":"Mobolaji-Adeolu/GLIMPS","sub_path":"GLIMPS_Pipeline.py","file_name":"GLIMPS_Pipeline.py","file_ext":"py","file_size_in_byte":85195,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"4"} +{"seq_id":"9120160772","text":"import exercise20200217_2 as m1\nfrom exercise20200217_2 import foo\nfrom random import randint\n\n\ndef roll_dice(n=2):\n \"\"\"摇色子\n :paramn:色子的个数\n :return:n颗色子点数之和5\n \"\"\"\n total = 0\n for _ in range(n):\n total += randint(1, 6)\n return total\n\n\ndef add(a=0, b=0, c=0):\n return a+b+c\n\n\n# 如果没有指定参数那么使用默认值摇两颗色子\nprint(roll_dice())\n# 摇三颗色子\nprint(roll_dice(3))\nprint(add())\nprint(add(1))\nprint(add(1, 2))\nprint(add(1, 2, 3))\n# 传递参数时可以不按照设定的顺序进行传递\nprint(add(c=50, a=100, b=200))\n\n\n\"\"\"def foo():\n print('hello,world!')\n\n\ndef foo():\n print('goodbye,world!')\n\n\nfoo()\"\"\"\n\nfoo()\nm1.foo()\n\n\ndef is_prime(num):\n for factor in range(2, num):\n if num % factor == 0:\n return False\n return True if num != 1 else False\n","repo_name":"chengwjn/MyDailyProgram","sub_path":"曾经的学习记录/PythonStudy/20200217/20200217exercise4.py","file_name":"20200217exercise4.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"22246752835","text":"from connection import connection\n\n\ndef insert_user(user):\n \n sql = \"\"\"INSERT INTO users(user_name,profile_url) VALUES(%s,%s) returning user_id\"\"\"\n conn = None\n try:\n conn = connection()\n cur = conn.cursor()\n url= \"https://www.instagram.com/\"\n cur.execute(sql, (user.user_name.upper() ,url+user.user_name,))\n user_id = cur.fetchone()\n conn.commit()\n cur.close()\n return user_id[0]\n except (Exception) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n\ndef user_exists(user):\n conn = connection()\n cur = conn.cursor()\n sql = \"\"\"select user_id,user_name from public.users where user_name=%s\"\"\"\n cur.execute(sql,(user.user_name.upper() ,))\n user_db = cur.fetchone()\n cur.close()\n conn.close()\n if (user_db is None ):\n return False\n else:\n return True\n\ndef get_all_users():\n conn = connection()\n cur = conn.cursor()\n sql = \"\"\"select user_id,user_name,profile_url from public.users\"\"\"\n cur.execute(sql,)\n users = cur.fetchall()\n cur.close()\n conn.close()\n return users\n\ndef get_user(user):\n conn = connection()\n cur = conn.cursor()\n sql = \"\"\"select user_id from public.users where user_name=%s\"\"\"\n cur.execute(sql,(user.user_name.upper() ,))\n user_id = cur.fetchone()\n cur.close()\n conn.close()\n return user_id[0]","repo_name":"drojas159/web_scrapping_ig","sub_path":"usuarioDAO.py","file_name":"usuarioDAO.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"25188068859","text":"# -*- coding: utf-8 -*-\r\n# @Time : 2018/8/25 19:57\r\n# @Author : Jeffrey\r\nfrom threading import Thread\r\n\r\nfrom spider.xici import XiCiSpider\r\n\r\nIP_PATH = 'proxy_pool/ip.txt'\r\n\r\n\r\nclass IpPool:\r\n def __init__(self, start, end):\r\n self.start = start\r\n self.end = end\r\n\r\n def get_single_ip_pool(self, page):\r\n spider = XiCiSpider(page)\r\n spider.get_html()\r\n tr = spider.parse_html()\r\n spider.download_data(tr)\r\n proxy = spider.addrs\r\n if len(proxy) > 0:\r\n for i in range(len(proxy)):\r\n self.save_ip_pool(proxy[i])\r\n\r\n def get_proxy_pool(self):\r\n for page in range(self.start, self.end):\r\n t = Thread(target=self.get_single_ip_pool, args=(page,))\r\n t.start()\r\n\r\n @staticmethod\r\n def save_ip_pool(proxy):\r\n f = open(IP_PATH, 'a')\r\n f.writelines(str(proxy) + '\\n')\r\n f.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n f = open(IP_PATH, 'r+')\r\n f.truncate() # 清空文件\r\n f.close()\r\n pool = IpPool(1, 10)\r\n pool.get_proxy_pool()\r\n","repo_name":"YYJeffrey/simple-proxy","sub_path":"pool_ip.py","file_name":"pool_ip.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"24730252384","text":"import requests\nimport json\n\ndef getMovieData(title):\n key='2d3ceeff'\n url='http://www.omdbapi.com/'\n params = {'apikey':key,'t':title} \n r = requests.get(url=url, params=params)\n \n data=[\n r.json()['Plot'],\n r.json()['Year'],\n r.json()['Language'],\n r.json()['Type'],\n r.json()['Rated'],\n r.json()['Genre'],\n r.json()['Awards'],\n r.json()['imdbRating']\n ]\n return data\n\ndef satisfyQuery(title):\n newsData = getMovieData(title)\n filmData = {\n 'genre':newsData[5],\n 'releaseYear':newsData[1],\n 'plot':newsData[0],\n 'imdbRating':newsData[7],\n 'rated':newsData[4],\n 'language':newsData[2],\n 'type':newsData[3],\n 'awards':newsData[6]\n }\n return filmData","repo_name":"nimotli/SmartAssistantArab","sub_path":"states/Api/movieApi.py","file_name":"movieApi.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"4"} +{"seq_id":"16517050141","text":"\n\ndef valid_index(index,text):\n if 0 <= index and index < len(text):\n return True\n return False\n\n\ndef add(index, word, text):\n\n if valid_index(index,text):\n text = text[0:index] + word + text[index::]\n print(text)\n return text\n print(text)\n return text\n\n\ndef remove(start_index, end_index, text):\n if valid_index(start_index,text) and valid_index(end_index,text):\n text = text[:start_index] + text[end_index+1:]\n print(text)\n return text\n print(text)\n return text\n\ndef switch(old_string, new_string, text):\n if old_string in text:\n text = text.replace(old_string,new_string)\n print(text)\n return text\n print(text)\n return text\n\ntext = input()\n\n\nwhile True:\n command = input()\n\n if command == 'Travel':\n break\n\n command = command.split(':')\n\n if 'Add Stop' in command:\n\n index = int(command[1])\n word = command[2]\n\n text = add(index,word,text)\n\n elif 'Remove Stop' in command:\n\n start_index = int(command[1])\n end_index = int(command[2])\n\n text = remove(start_index,end_index,text)\n\n elif 'Switch' in command:\n\n old_string = command[1]\n new_string = command[2]\n\n text = switch(old_string,new_string,text)\n\n\nprint(f'Ready for world tour! Planned stops: {text}')","repo_name":"milensski/SoftUni_Fundamentals","sub_path":"Final Exam Preperation/Final_Prep_Exam 2/word_tour.py","file_name":"word_tour.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"33821903469","text":"from tkinter import *\n\n\nroot = Tk()\nroot.title(\"Events Handling\")\nroot.geometry(\"340x388\")\n\n\ndef coordinates(event):\n print(f\"Button is at {event.x},{event.y}\")\n\nwidget = Button(root, text=\"Click\")\nwidget.pack()\n\n\"\"\"the event , the middle button by , and the rightmost mouse button by .\n defines the scroll up event on mice with wheel support and and the scroll down.\n You can use ButtonPress instead of Button, or even leave it out completely: , , and <1> are all synonyms.\"\"\"\nwidget.bind(\"\", coordinates) # On one click coordinates function executed\n\n\"\"\"Similar to the Button event, see above, but the button is double clicked instead of a single click. To specify the left, middle or right \n mouse button use , , and respectively.\n You can use Double or Triple as prefixes. Note that if you bind to both a single click () and a double click \n (), both bindings will be called.\"\"\"\nwidget.bind(\"\", quit)# On double click coordinates function executed\n\n\"\"\"The mouse is moved with a mouse button being held down. To specify the left, middle or right mouse button use , and\n respectively. The current position of the mouse pointer is provided in the x and y members of the event object passed to the \n callback, i.e. event.x, event.y\"\"\"\nwidget.bind(\"Motion\", coordinates)\n\n\nroot.mainloop()","repo_name":"Vikramkumarcoder/Tkinter_course","sub_path":"Basics/Events.py","file_name":"Events.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"28431765015","text":"#-*- coding: utf-8 -*-\n\nfrom flask import Flask, render_template, request, redirect\nfrom pymongo import MongoClient\nimport re\n\napp = Flask(__name__)\n\napp.debug = True\n\nclient = MongoClient('localhost', 27017)\ndb = client['searchengine']\n\n@app.route(\"/\")\ndef index():\n sites = db['sites'].find()\n return render_template('index.html', sites=sites)\n\n@app.route(\"/newsite\", methods=[\"POST\"])\ndef newsite():\n data = {\n 'url': request.form['site']\n }\n\n if db['sites'].insert(data):\n return redirect('/')\n\n return redirect('/')\n\n@app.route(\"/search\")\ndef search():\n term = request.args['search']\n cursor = db['infos'].find({\"title\": {\"$regex\": \"[((?!\" + term.replace(\" \", \"\") + \").)*$]\"}})\n \n results = []\n for x in cursor:\n results.append(x)\n\n return render_template('search.html', results=results)\n\nif __name__ == \"__main__\":\n app.run()","repo_name":"reginaldojunior/searchengine","sub_path":"frontend/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"29249637995","text":"#!/usr/bin/env python3\n\"\"\"\n`rpn` testing\n\n@authors: Roman Yasinovskyy, Karina Hoff\n@version: 2021.9\n\"\"\"\n\nimport pathlib\n\nimport pytest\n\nfrom rpn import StackError, TokenError, do_math, postfix_eval, rpn_calc\n\n\n@pytest.mark.parametrize(\n \"expression, expected\",\n [(\"2 3 + =\", 5), (\"2 1 - =\", 1), (\"1 2 - 3 * 3 + =\", 0), (\"1 2 - 1 * =\", -1)],\n)\ndef test_postfix_eval(expression, expected):\n \"\"\"Test correct postfix expressions\"\"\"\n assert postfix_eval(expression) == expected\n\n\n@pytest.mark.parametrize(\"filename, expected\", [(\"rpn_1\", 18.61), (\"rpn_2\", 8118)])\ndef test_checksum(filename, expected):\n \"\"\"Test correct postfix expressions\"\"\"\n if not pathlib.Path(f\"{filename}.in.txt\").exists():\n filename = f\"projects/rpn/{filename}\"\n assert pytest.approx(rpn_calc(f\"{filename}.in.txt\"), 0.01) == expected\n\n\n@pytest.mark.parametrize(\n \"expression, err_message\",\n [\n (\"=\", \"Stack is empty\"),\n (\"1 2 =\", \"Stack is not empty\"),\n (\"1 2 3 + =\", \"Stack is not empty\"),\n (\"1 2 + 3 =\", \"Stack is not empty\"),\n ],\n)\ndef test_postfix_eval_stack_error(expression, err_message):\n \"\"\"Test incorrect postfix expressions: Stack Error\"\"\"\n with pytest.raises(StackError) as excinfo:\n postfix_eval(expression)\n exception_message = excinfo.value.args[0]\n assert exception_message == err_message\n\n\n@pytest.mark.parametrize(\n \"expression, err_message, err_token\",\n [(\"a b + =\", \"Unknown token\", \"a\"), (\"1 2 @ =\", \"Unknown token\", \"@\")],\n)\ndef test_postfix_eval_token_error(expression, err_message, err_token):\n \"\"\"Test incorrect postfix expressions: Unknown Token Error\"\"\"\n with pytest.raises(TokenError) as excinfo:\n postfix_eval(expression)\n exception_message = excinfo.value.args[0]\n assert exception_message == f\"{err_message}: {err_token}\"\n\n\n@pytest.mark.parametrize(\n \"operation, operand1, operand2, expected\",\n [(\"+\", 2, 3, 5), (\"-\", 2, 3, -1), (\"*\", 2, 3, 6), (\"/\", 10, 2, 5)],\n)\ndef test_do_math_simple_int_success(operation, operand1, operand2, expected):\n \"\"\"Test simple math expressions\"\"\"\n assert do_math(operation, operand1, operand2) == expected\n\n\n@pytest.mark.parametrize(\n \"operation, operand1, operand2, expected\", [(\"/\", 2, 3, 0.6666), (\"/\", 3, 2, 1.5)]\n)\ndef test_do_math_simple_float_success(operation, operand1, operand2, expected):\n \"\"\"Test simple math expressions\"\"\"\n assert do_math(operation, operand1, operand2) == pytest.approx(expected, 0.001)\n\n\n@pytest.mark.parametrize(\n \"operation, operand1, operand2, err_message\",\n [\n (\"/\", 2, 0, \"division by zero\"),\n (\"%\", 2, 0, \"integer division or modulo by zero\"),\n ],\n)\ndef test_do_math_simple_error(operation, operand1, operand2, err_message):\n \"\"\"Test simple math expressions\"\"\"\n with pytest.raises(ZeroDivisionError) as excinfo:\n do_math(operation, operand1, operand2)\n exception_message = excinfo.value.args[0]\n assert exception_message == f\"{err_message}\"\n\n\n@pytest.mark.parametrize(\"symbol\", [1, \"a\"])\ndef test_do_math_syntax_error(symbol):\n \"\"\"Test incorrect simple math expressions\"\"\"\n with pytest.raises(SyntaxError) as excinfo:\n do_math(symbol, \"/\", 2)\n exception_message = excinfo.value.args[0]\n assert exception_message == f\"invalid syntax\"\n\n\n@pytest.mark.parametrize(\n \"operation, operand1, operand2, expected\",\n [(\"//\", 2, 3, 0), (\"//\", 3, 2, 1), (\"**\", 2, 3, 8), (\"**\", 5, 6, 15625)],\n)\ndef test_do_math_advanced(operation, operand1, operand2, expected):\n \"\"\"Test simple math expressions\"\"\"\n assert do_math(operation, operand1, operand2) == expected\n\n\n@pytest.mark.skip\n@pytest.mark.parametrize(\n \"operation, operand1, operand2, expected\",\n [\n (\"&\", 5, 6, 4),\n (\"&\", 51, 61, 49),\n (\"|\", 5, 6, 7),\n (\"|\", 51, 61, 63),\n (\"^\", 5, 6, 3),\n (\"^\", 51, 61, 14),\n ],\n)\ndef test_do_math_bitwise(operation, operand1, operand2, expected):\n \"\"\"Test bitwise expressions\"\"\"\n assert do_math(operation, operand1, operand2) == expected\n\n\n@pytest.mark.parametrize(\n \"operation, operand1, operand2, err_message\",\n [\n (\"//\", 2, 0, \"integer division or modulo by zero\"),\n (\"//\", 0, 0, \"integer division or modulo by zero\"),\n ],\n)\ndef test_do_math_advanced_error(operation, operand1, operand2, err_message):\n \"\"\"Test advanced math errors\"\"\"\n\n with pytest.raises(ZeroDivisionError) as excinfo:\n do_math(operation, operand1, operand2)\n exception_message = excinfo.value.args[0]\n assert exception_message == f\"{err_message}\"\n\n\nif __name__ == \"__main__\":\n pytest.main([\"-v\", __file__])\n","repo_name":"sturekev/ads-class-pub","sub_path":"projects/rpn/rpn_test.py","file_name":"rpn_test.py","file_ext":"py","file_size_in_byte":4626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"4"} +{"seq_id":"39080471553","text":"#!/usr/bin/env python3\r\n'''\r\n@Author: King\r\n@Date: 2022-12-30 12:00:08\r\n@Email: linsy_king@sjtu.edu.cn\r\n@Url: https://yydbxx.cn\r\n'''\r\n\r\nimport requests\r\nimport time\r\nimport threading\r\nfrom rich.progress import Progress, SpinnerColumn, TextColumn\r\n\r\n\r\nclass ElectSingle:\r\n def __init__(self, JSESSIONID, TURNID, COURSEID: list, thread_number=3, max_try=None, course_desc: list = None) -> None:\r\n '''\r\n COURSEID: list of the ElectTurnLessonTaskID of courses\r\n thread_number: thred number for each course\r\n max_try: max try times\r\n course_desc: list of course description\r\n '''\r\n self.ELECTTURNID = TURNID\r\n self.COURSEID = COURSEID\r\n self.course_desc = course_desc\r\n self.trymax = max_try\r\n self.stop = 0\r\n self.reqall = 0\r\n self.Nu = thread_number\r\n self.cookies = {\r\n 'JSESSIONID': JSESSIONID\r\n }\r\n self.headers = {\r\n 'Connection': 'keep-alive',\r\n 'sec-ch-ua': '^\\\\^Chromium^\\\\^;v=^\\\\^92^\\\\^, ^\\\\^',\r\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\r\n 'X-CSRF-TOKEN': 'ab3849d5-4aa0-471c-86eb-adfc0916af73',\r\n 'X-Requested-With': 'XMLHttpRequest',\r\n 'sec-ch-ua-mobile': '?0',\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36',\r\n 'Content-Type': 'application/x-www-form-urlencoded',\r\n 'Origin': 'https://coursesel.umji.sjtu.edu.cn',\r\n 'Sec-Fetch-Site': 'same-origin',\r\n 'Sec-Fetch-Mode': 'cors',\r\n 'Sec-Fetch-Dest': 'empty',\r\n 'Referer': 'https://coursesel.umji.sjtu.edu.cn/welcome.action',\r\n 'Accept-Language': 'zh-CN,zh;q=0.9',\r\n }\r\n # print(\r\n # f\"init: electturnid: {self.ELECTTURNID}, courses: {self.COURSEID}, trymax: {self.trymax}\")\r\n print(f\"using {thread_number} thread(s) for each course\")\r\n\r\n def sendreq(self, data, taskid):\r\n while(1):\r\n if(self.stop):\r\n self.progress.update(\r\n self.tasks[taskid], description=\"[white]\" + self.course_desc[taskid] + \": [red]Maximum try times reached\")\r\n return\r\n ts = time.time()\r\n if self.trymax and self.reqall >= self.trymax:\r\n self.stop = 1\r\n self.progress.update(\r\n self.tasks[taskid], description=\"[white]\" + self.course_desc[taskid] + \": [red]Maximum try times reached\")\r\n return\r\n realtime = int(round(ts * 1000))\r\n params = (\r\n ('_t', realtime),\r\n )\r\n response = requests.post('https://coursesel.umji.sjtu.edu.cn/tpm/doElect_ElectTurn.action',\r\n headers=self.headers, params=params, cookies=self.cookies, data=data)\r\n ret = response.content.decode(encoding='utf-8', errors='ignore')\r\n if(len(ret) > 1000):\r\n # print(\"error\")\r\n self.progress.log(\"Got error response. It's very likely that you are using expired JSESSIONID.\")\r\n self.progress.update(\r\n self.tasks[taskid], advance=1, description=\"[white]\" + self.course_desc[taskid] + \": [red]Failed\")\r\n return\r\n if(str(ret).find(\"false\") == -1):\r\n # print(\"success, congrats\")\r\n self.progress.update(\r\n self.tasks[taskid], advance=1, description=\"[white]\" + self.course_desc[taskid] + \": [green]Success\")\r\n return\r\n # self.progress.log(ret.strip())\r\n self.progress.update(self.tasks[taskid], advance=1)\r\n self.reqall += 1\r\n\r\n def run(self):\r\n s = []\r\n self.tasks = []\r\n for id, courseid in enumerate(self.COURSEID):\r\n data = f'jsonString=%7B%22electTurnId%22%3A%22{self.ELECTTURNID}%22%2C%22autoElect%22%3Atrue%2C%22lessonTasks%22%3A%5B%22{courseid}%22%5D%7D'\r\n for _ in range(self.Nu):\r\n s.append(threading.Thread(\r\n target=self.sendreq, args=(data, id,)))\r\n\r\n for i in range(len(s)):\r\n s[i].start()\r\n\r\n # Record run time\r\n start_time = time.time()\r\n\r\n with Progress(\r\n SpinnerColumn(),\r\n TextColumn(\"{task.description} (tried: {task.completed})\"),\r\n transient=False\r\n ) as progress:\r\n self.progress = progress\r\n for i in range(len(self.COURSEID)):\r\n if self.course_desc:\r\n desc = self.course_desc[i]\r\n else:\r\n desc = self.COURSEID[i]\r\n desc = f\"[white]{desc}: Electing...\"\r\n task = progress.add_task(desc, total=None)\r\n self.tasks.append(task)\r\n\r\n for i in range(len(s)):\r\n s[i].join()\r\n\r\n # Print run time\r\n print(f\"All tasks ended in {time.time() - start_time} seconds.\")\r\n","repo_name":"linsyking/ji-coursesel-cli","sub_path":"courseselcli/single.py","file_name":"single.py","file_ext":"py","file_size_in_byte":5082,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"4"} +{"seq_id":"562251353","text":"import os, datetime, collections, json\nfrom aqt import mw\nfrom aqt.qt import QMessageBox\nfrom aqt.utils import showText\nfrom anki.utils import stripHTML\nfrom . import furigana\n\ntry:\n from importlib import reload\nexcept:\n pass #Python 2 has reload built-in\n\ndef clean(word):\n return stripHTML(word).strip()\n\ndef highlightSentences(browserNids):\n from . import core\n reload(core)\n from . import config as conf\n reload(conf)\n \n model = mw.col.models.byName(conf.noteType)\n if model is None:\n QMessageBox.warning(mw, conf.progName, \"Can't find note type\")\n return\n fieldNames = [fld[\"name\"] for fld in model[\"flds\"]]\n if conf.wordField1 not in fieldNames:\n QMessageBox.warning(mw, conf.progName, \"Can't find word field 1\")\n return\n if conf.wordField2 is not None and conf.wordField2 not in fieldNames:\n QMessageBox.warning(mw, conf.progName, \"Can't find word field 2\")\n return\n if conf.sentenceField not in fieldNames:\n QMessageBox.warning(mw, conf.progName, \"Can't find sentence field\")\n return\n if conf.targetField is not None and conf.targetField not in fieldNames:\n QMessageBox.warning(mw, conf.progName, \"Can't find target field\")\n return\n \n outcomeCounts = collections.Counter()\n usableNids = mw.col.findNotes(\"mid:\" + str(model[\"id\"]))\n if browserNids is None:\n nids = usableNids\n else:\n usableNids = set(usableNids)\n #keeping browserNids order here for log file, but not sure if it means anything\n nids = [nid for nid in browserNids if nid in usableNids]\n outcomeCounts[\"wrong type\"] = len(browserNids) - len(nids)\n if nids == []:\n QMessageBox.warning(mw, conf.progName, \"No notes to process\")\n return\n \n if conf.targetField is not None:\n reply = QMessageBox.question(mw, conf.progName,\n \"Really overwrite %s for %d notes?\" % (conf.targetField, len(nids)),\n QMessageBox.Yes, QMessageBox.No)\n if reply != QMessageBox.Yes:\n return\n mw.checkpoint(\"Highlight sentences\") #undo\n \n try:\n wordFinder = core.WordFinder(conf)\n except IOError:\n QMessageBox.warning(mw, conf.progName, \"Can't load inflection dictionary\")\n return\n \n currentTime = datetime.datetime.now().strftime(\"%Y-%m-%d_%H%M%S\")\n logName = conf.progName + \"_%s.log\" % currentTime\n logPath = os.path.normpath(os.path.join(mw.col.media.dir(), \"..\", logName)) #in user profile folder\n mw.progress.start(label=\"Working...\", immediate=True)\n \n try:\n with open(logPath, \"wb\") as logFile:\n for nid in nids:\n note = mw.col.getNote(nid)\n sentence = note[conf.sentenceField]\n word1 = clean(note[conf.wordField1])\n word2 = \"\" if conf.wordField2 is None else clean(note[conf.wordField2])\n wordsDup = [word1, word2, furigana.kanji(word1), furigana.kanji(word2),\n furigana.kana(word1), furigana.kana(word2)]\n words = []\n for word in wordsDup:\n if word not in words:\n words.append(word)\n result = wordFinder.processSentence(words, sentence)\n outcomeCounts[result[\"desc\"]] += 1\n logLine = result[\"desc\"] + \"\\t\" + word1 + \"\\t\" + word2 + \"\\t\" + result[\"new sentence\"] + \"\\n\"\n logFile.write(logLine.encode(\"utf-8\"))\n if conf.targetField is not None:\n #Update note. Not the shortest way to write this, but want to be clear!\n if conf.targetField == conf.sentenceField:\n #Overwriting original field.\n #Update note and add the tag if sentence has changed.\n #Leave tag if it was there already - \n # multiple runs with changed program will add to previous results.\n if result[\"matched\"]:\n note[conf.targetField] = result[\"new sentence\"]\n note.addTag(conf.matchedTag)\n else:\n #Overwriting a different field.\n #Update every note. Add the tag if sentence has changed.\n #Remove tag if no match - \n # multiple runs with changed program will overwrite previous results.\n note[conf.targetField] = result[\"new sentence\"]\n if result[\"matched\"]:\n note.addTag(conf.matchedTag)\n else:\n note.delTag(conf.matchedTag)\n note.flush()\n totalsReport = u\"\"\n for outcome in [\"match found\", \"no match\", \"done already\", \"empty sentence\", \"wrong type\"]:\n totalsReport += outcome + \"\\t\" + str(outcomeCounts[outcome]) + \"\\n\"\n logFile.write((u\"\\nTOTALS\\n\" + totalsReport).encode(\"utf-8\"))\n mw.progress.finish()\n mw.reset()\n textToShow = totalsReport.replace(\"\\t\", \": \") + \"\\nwrote log file: \" + logPath\n showText(textToShow, title=conf.progName)\n except IOError:\n mw.progress.finish()\n QMessageBox.warning(mw, conf.progName, \"Error writing log file\")\n","repo_name":"HelenFoster/JSentenceHighlighter","sub_path":"jsentencehighlighter/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5382,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"4"} +{"seq_id":"24483052140","text":"from survey_class import Survey\na = 'what was your first language you learned\\n'\nprint(a)\nlan=Survey(a)\nlan.show_questions()\nprint('\\n')\nwhile True:\n ans=input('Enter your response:- ')\n if ans=='q':\n break\n lan.store_responses(ans)\nprint(\"\\nThank you to everyone who participated in the survey!\")\nlan.show_result()","repo_name":"nirajcx/crash","sub_path":"pythons/testing codes/lang_survay.py","file_name":"lang_survay.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"35605440869","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom PIL import Image\nimport skimage\nimport skimage.color\nimport scipy.signal\nimport scipy.fft\nimport matplotlib.pyplot as plt\n\nimport utils\nimport pcVC\nimport mLM\nimport mW\nimport aL\nimport mRL \n\n\n\"\"\"\nExperiments with a grayscale image and noisy motion blur\n\"\"\"\n\nimg = Image.open('../images/barbara_face.png')\nxs = np.asarray(img)\nxs = skimage.img_as_float(xs)\n\n\n\"\"\"\nDefinition of the filter (noisy motion blur). \nGaussian noise is used, and kernel2 is used for the motion blur. \n\"\"\"\n\nnoise_mean = 0\nnoise_var = 0.00001\nimg = Image.open('../kernels/testkernel2.bmp')\nh = np.asarray(img)\nh = skimage.color.rgb2gray(h) if len(h.shape) == 3 else h\nh = skimage.img_as_float(h)\nh = h / np.sum(h[:])\nN = xs.shape[0]\nM = xs.shape[1]\nC = 1 if len(xs.shape)!=3 else xs.shape[2]\n\nHf = utils.psf2otf(h, (N,M)) if C == 1 else utils.psf2otf(h, (N,M,C))\n\nif C == 1:\n f = lambda x: np.real(scipy.fft.ifft2(scipy.fft.fft2(x[:,:])*Hf))\nelse:\n f = lambda x: np.real(scipy.fft.ifftn(scipy.fft.fftn(x[:,:,:])*Hf))\n\nF = lambda x: skimage.util.random_noise(f(x), mode='gaussian', mean = noise_mean, var = noise_var)\n\ny = F(xs)\n\n\n\"\"\"\nShow the input and the blurred input image\n\"\"\"\n\nfig1, axes1 = plt.subplots(1,2, figsize=(18,6))\naxes1[0].imshow(xs, cmap='gray', vmin=0.0, vmax=1.0)\naxes1[0].set_title('Original image')\naxes1[1].imshow(y, cmap='gray', vmin=0.0, vmax=1.0)\naxes1[1].set_title('Observed image')\n\n\n\"\"\"\nInput, phase corrected VC and LM \n\"\"\"\n\npcvc = pcVC.pcVC(F, y)\nmlm = mLM.mLM(F, y)\nmw = mW.mW(F, y)\nal = aL.aL(F, y)\nmrl = mRL.mRL(F, y)\n\nfig2, axes2 = plt.subplots(1,3, figsize=(18,6))\naxes2[0].imshow(pcvc, cmap='gray', vmin=0.0, vmax=1.0)\naxes2[0].set_title('pcVC')\naxes2[1].imshow(mlm, cmap='gray', vmin=0.0, vmax=1.0)\naxes2[1].set_title('mLM')\naxes2[2].imshow(mw, cmap='gray', vmin=0.0, vmax=1.0)\naxes2[2].set_title('mW')\n\nfig3, axes3 = plt.subplots(1,3, figsize=(18,6))\naxes3[0].imshow(xs, cmap='gray', vmin=0.0, vmax=1.0)\naxes3[0].set_title('Original image')\naxes3[1].imshow(al, cmap='gray', vmin=0.0, vmax=1.0)\naxes3[1].set_title('aL')\naxes3[2].imshow(mrl, cmap='gray', vmin=0.0, vmax=1.0)\naxes3[2].set_title('mRL')\nplt.show()\n","repo_name":"fayolle/bbDeblur_py","sub_path":"src/test_bw.py","file_name":"test_bw.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"25685137737","text":"data = open(\"Day_1/Data.txt\").read().split()\n\n\ndef sum_number(number):\n sum_of_digits = 0\n for digit in str(number):\n sum_of_digits += int(digit)\n\n return sum_of_digits\n\n\ndef sum_to_2020():\n num_sums = []\n for i, number in enumerate(data):\n j = sum_number(data[i]) * sum_number(data[i - 1])\n # print(j)\n if j == 220:\n num_sums.append(number)\n if len(num_sums) == 2:\n num_sums = [int(x) for x in num_sums]\n print(num_sums[0] * num_sums[1])\n return num_sums\n\n\nprint(sum_number(1721))\n","repo_name":"jmoro0408/Advent_Code","sub_path":"2020/Day_1/Day_1.py","file_name":"Day_1.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"34083000311","text":"import random, time, os\n\ndef add():\n a = open('my.ideas', 'a+')\n print()\n ask1 = input('Enter the idea: ').strip()\n a.write(f'{ask1}\\n')\n a.close()\n time.sleep(1)\n print()\n print('\\033[32mIdea Added Successfully😎\\033[0m')\n time.sleep(1.5)\n os.system('clear')\n\n\ndef view():\n y = []\n try:\n t = open('my.ideas', 'r')\n while True:\n k = t.readline().strip()\n if k == '':\n break\n y.append(k)\n u = random.randint(0, len(y)-1)\n print()\n print(y[u])\n time.sleep(6)\n t.close()\n os.system('clear')\n menu()\n except FileNotFoundError:\n print()\n print('\\033[31mYou do not have any idea to view! Add ideas first!\\033[0m')\n time.sleep(3)\n os.system('clear')\n menu()\n\n \ndef menu():\n while True:\n print()\n ask = input('''Enter:\n 1 to Add and idea\n 2 to randomly view an idea\n >> ''')\n if ask == '1':\n add()\n elif ask == '2':\n view()\n\nmenu()","repo_name":"Marach8/day-50-100-days-of-code","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"71495058037","text":"n, r = map(int, input().split())\n\ng = [[] for i in range(n)]\nfor i in range(r):\n a, b, d = map(int, input().split())\n g[a - 1].append((b - 1, d))\n g[b - 1].append((a - 1, d))\n\nINF = 10 ** 9\ndist = [INF] * n\ndist2 = [INF] * n\n\nimport heapq\nq = []\nheapq.heapify(q)\n\nd[0] = 0\nheapq.heappush(q, (0, 0))\n\nwhile len(q) > 0:\n p = heapq.heappop\n idx = p[1]\n d = p[0]\n\n if dist2[idx] < d:\n continue\n\n for nv in g[idx]:\n nv_idx = nv[0]\n nv_d = nv[1]\n\n d2 = d + nv_d\n\n if dist[nv_idx] > d2:\n dist[nv_idx], d2 = d2, dist[nv_idx]\n heapq.heappush((dist[nv_idx], nv_idx))\n \n if dist2[nv_idx] > d2 and dist[nv_idx] < d2:\n dist2[nv_idx] = d2\n heapq.heappush((dist2[nv_idx], nv_idx))\n\nprint(dist2[n - 1])","repo_name":"knts0/atcoder","sub_path":"learning/arihon/2_shokyu/2-5_graph/roadblocks.py","file_name":"roadblocks.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"4904782754","text":"from selenium import webdriver\nimport requests\nfrom PIL import Image\nimport cv2 as cv\nimport numpy\nimport pytesseract\nfrom io import BytesIO\nimport time\nfrom selenium.webdriver.common.keys import Keys\n\n\nclass Data_process:\n url_frist = 'http://202.192.240.212:82'\n\n def __init__(self, data):\n #实例化是调用,返回接收客户端的数据\n self.data = data\n #开启浏览器,后面路径是安装谷歌辅助工具,谷歌浏览器才能正常开启\n self.browser=webdriver.Chrome()\n \n def isElementExist(self,element):\n flag=True\n try:\n self.browser.find_element_by_id(element)\n return flag\n except:\n flag=False\n return flag\n\n def imgToText(self, headers1):\n #获取验证码,find_element_by_id 找html中id属性\n jpg=self.browser.find_element_by_id('ccodeimg')\n img_src=jpg.get_attribute(\"src\")\n\n #拿到验证码储存\n response =requests.get(img_src,headers=headers1)\n image = Image.open(BytesIO(response.content))\n image.save('D:\\\\WYU_library.png')\n\n src = cv.imread('D:\\\\WYU_library.png')\n #cv.imshow(\"src\", src)\n img = cv.cvtColor(src, cv.COLOR_BGR2GRAY)\n ret, binary = cv.threshold(img,220,255,cv.THRESH_BINARY)\n #cv.imshow(\"c\",binary)\n kernel = numpy.ones((1, 1), numpy.uint8)\n #img = cv.dilate(binary, kernel, iterations=1)\n img = cv.erode(binary, kernel, iterations=1)\n #cv.imshow('open_out', img)\n text = pytesseract.image_to_string(img)\n #cv.waitKey(1)\n print(\"This OK:%s\"%text)\n return text\n\n\n def menu(self,dataPro):\n print(\"\\n欢迎使用五邑大学图书馆辅助脚本!\")\n print(time.strftime('%Y-%m-%d',time.localtime(time.time())))\n while True:\n print(\"\\n====== ★功能菜单★ ======\\n\")\n print(\"0、当前借阅情况 输入:0\")\n print(\"1、借书历史查询 输入:1\")\n print(\"2、目录检索 输入:2\")\n print(\"3、热门借阅 输入:3\")\n print(\"4、退出脚本 输入:4\")\n gnxz=input(\"\\n[WYU]等待输入:\")\n if gnxz=='0':\n dataPro.dqjy()\n if gnxz=='1':\n list5 = dataPro.lscx(dataPro)\n print(\"list5:\",len(list5))\n if gnxz=='2':\n dataPro.mljs()\n if gnxz=='3':\n dataPro.rmjy()\n if gnxz=='4':\n dataPro.browser.quit()\n break\n def dqjy(self):\n pass\n \n #历史记录\n def lscx(self,dataPro):\n url1 = Data_process.url_frist + '/user/bookborrowedhistory.aspx'\n self.browser.get(url1)\n time.sleep(1)\n\n print(\"\\n查询中,请销后...\\n\")\n\n #从页面中获取信息\n list2,list3 = [],[]\n while True:\n list1 = []\n #find_elements_by_tag_name 找html中的tag属性,..\n tds = self.browser.find_elements_by_tag_name(\"td\")\n for td in tds:\n #.text是里面的本文内容,即显示在网页中的文字\n list1.append(td.text)\n list3 = list3 + list1[5:]\n if(dataPro.isElementExist(\"ctl00_cpRight_Pagination2_nexthl2\")):\n self.browser.find_element_by_id(\"ctl00_cpRight_Pagination2_nexthl2\").click()\n else:\n break\n\n #切割获取头列表 \n list2 = list1[0:4]\n\n #删除多余项,有一行数据是空的\n while '' in list3:\n list3.remove('')\n \n #转成二维列表,便于输出\n m,k=0,0\n list4 = []\n list4.append([])\n #list[k++[m++]]\n for i in list3:\n #每四个元素就增加[]\n if m == 4:\n list4.append([])\n k = k + 1\n m=0\n\n list4[k].append(i)\n list4[k].append('\\n')\n m = m + 1\n '''\n for i in range(len(list4)):\n for j in range(len(list4[i])):\n print(list4[i][j])\n print(\"\\n\")\n '''\n #关闭浏览器\n self.browser.close()\n return list4\n\n def mljs(self):\n pass\n\n def rmjy(self):\n pass\n\n\n #登录函数\n def login(self,dataPro):\n url = Data_process.url_frist + '/login.aspx'\n #发送链接到浏览器\n self.browser.get(url)\n #等待,防止页面没有加载完成\n time.sleep(1)\n\n #获取cookie,没有的话,网站无法识别身份,验证码不匹配\n cookie_bro = self.browser.get_cookies()\n #print(cookie_bro)\n cookie1 = cookie_bro[0]['value']\n #print(\"当前cookie: \"+cookie1)\n\n headers1 = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive',\n 'Cookie': 'ASP.NET_SessionId=' + cookie1,\n 'Host': '202.192.240.212:82',\n 'Referer': 'http://www.wyu.edu.cn/lib/tb.htm',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent':'Chrome/79.0.3945.88'\n }\n\n\n #输入登录信息\n self.browser.find_element_by_id(\"ctl00_ContentPlaceHolder1_txtUsername_Lib\").send_keys(self.data[:10])\n self.browser.find_element_by_id(\"ctl00_ContentPlaceHolder1_txtUsername_Lib\").send_keys(Keys.TAB)\n self.browser.find_element_by_id(\"ctl00_ContentPlaceHolder1_txtPas_Lib\").send_keys(self.data[10:])\n\n code = dataPro.imgToText(headers1)\n #可以做验证码自动识别\n #code = input(\"[*]请输入验证码:\")\n self.browser.find_element_by_id(\"ctl00_ContentPlaceHolder1_txtCode\").send_keys(code)\n self.browser.find_element_by_id(\"ctl00_ContentPlaceHolder1_btnLogin_Lib\").click()\n\n while(dataPro.isElementExist(\"ctl00_ContentPlaceHolder1_lblErr_Lib\")):\n print('验证码识别失败,正在重新识别')\n self.browser.get(url)\n time.sleep(1)\n self.browser.find_element_by_id(\"ctl00_ContentPlaceHolder1_txtUsername_Lib\").send_keys(self.data[:10])\n self.browser.find_element_by_id(\"ctl00_ContentPlaceHolder1_txtUsername_Lib\").send_keys(Keys.TAB)\n self.browser.find_element_by_id(\"ctl00_ContentPlaceHolder1_txtPas_Lib\").send_keys(self.data[10:])\n\n code = dataPro.imgToText(headers1)\n self.browser.find_element_by_id(\"ctl00_ContentPlaceHolder1_txtCode\").send_keys(code)\n self.browser.find_element_by_id(\"ctl00_ContentPlaceHolder1_btnLogin_Lib\").click()\n\n print(\"\\n====== ★登陆成功★ ======\\n\")\n'''\ndata = '3118001162cwh13671461740'\ndataPro = Data_process(data)\ndataPro.login(dataPro)\n'''","repo_name":"7pluto/WYU_library","sub_path":"WYU_library_1.0/python服务端/data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":7095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"31635302061","text":"#!/usr/bin/env python\n\nimport time\n\ndef get(n):\n x = 9\n y = 1\n n -= 1\n while n > x:\n n -= x*y\n x = x*10\n y += 1\n\n z = n//y\n zz = 10**(y-1) + z\n return int(str(zz)[ n % y ])\n\nstart = time.clock()\n\nans2 = 1\n\nfor i in range(0, 7):\n ans2 *= get(10**i)\n\nprint(\"{}\".format(ans2))\n\nend = time.clock()\nprint(end-start)\n\n","repo_name":"KyleRoarty/project_euler","sub_path":"python/p040.py","file_name":"p040.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"18136069771","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport os\nimport numpy as np\nfrom itertools import count\nimport sys, getopt\nfrom models.discriminator import critic\nfrom models.richzhang import richzhang as generator\nfrom models.unet import unet\nfrom models.color_unet import color_unet\nfrom models.middle_unet import middle_unet\nfrom settings import s\nimport time\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nfrom torch.utils.data import dataloader\nimport json\nfrom functions import load_trainset\nfrom functions import ab2bins\nfrom functions import softCossEntropyLoss\nfrom skimage import color\nfrom scipy.ndimage.interpolation import zoom\n\ndef main(argv):\n # setting argument defaults\n mbsize = s.batch_size\n report_freq=s.report_freq\n weight_path=s.weights_path\n weights_name=s.weights_name\n lr=s.learning_rate\n save_freq = s.save_freq\n mode=3\n image_loss_weight=s.image_loss_weight\n epochs = s.epochs\n beta1,beta2=s.betas\n infinite_loop=s.infinite_loop\n data_path = s.data_path\n drop_rate = 0\n lab = True\n weighted_loss=True\n weight_lambda=.25\n load_list=s.load_list\n help='train_classification.py -b -e -r -w \\\n -n -s -l -p -d -m --beta1 \\\n --beta2 --lab --weighted \\\n --lambda '\n try:\n opts, args = getopt.getopt(argv,\"he:b:r:w:l:s:n:p:d:i:m:\",\n ['epochs=',\"mbsize=\",\"report-freq=\",'weight-path=', 'lr=','save-freq=','weight-name=','data_path=','drop_rate='\n 'beta1=','beta2=','lab','image-loss-weight=','weighted','mode=','lambda='])\n except getopt.GetoptError:\n print(help)\n sys.exit(2)\n print(\"opts\" ,opts)\n for opt, arg in opts:\n if opt == '-h':\n print(help)\n sys.exit()\n elif opt in (\"-b\", \"--mbsize\"):\n mbsize = int(arg) \n elif opt in (\"-e\", \"--epochs\"):\n epochs = int(arg)\n infinite_loop=False\n elif opt in ('-r','--report-freq'):\n report_freq = int(arg)\n elif opt in (\"-w\", \"--weight-path\"):\n weight_path = arg\n elif opt in (\"-n\", \"--weight-name\"):\n weights_name = arg \n elif opt in (\"-s\", \"--save-freq\"):\n save_freq=int(arg)\n elif opt in (\"-l\", \"--lr\"):\n lr = float(arg)\n elif opt in (\"-p\", \"--data_path\"):\n data_path = str(arg)\n elif opt in (\"-d\", \"--drop_rate\"):\n drop_rate = float(arg)\n elif opt=='-m':\n if arg in ('richzhang','0','ende'):\n mode = 0\n elif arg in ('u','1','unet'):\n mode = 1\n elif arg in ('color','2','cu'):\n mode = 2\n elif arg in ('mu','3','middle'):\n mode = 3\n elif opt=='--beta1':\n beta1 = float(arg)\n elif opt=='--beta2':\n beta2 = float(arg)\n elif opt=='--lab':\n lab=True\n elif opt =='--weighted':\n weighted_loss= not weighted_loss\n elif opt =='--load-list':\n load_list=not load_list\n elif opt =='--lambda':\n weight_lambda = float(arg)\n device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n dataset=None\n in_size = 256\n if 'cifar' in data_path:\n in_size = 32\n dataset = 0\n elif 'places' in data_path:\n in_size = 224\n dataset = 1\n elif 'stl' in data_path:\n in_size = 96\n dataset = 2\n in_shape=(3,in_size,in_size)\n\n #out_shape=(s.classes,32,32)\n betas=(beta1,beta2)\n weight_path_ending=os.path.join(weight_path,weights_name+'.pth')\n\n loss_path_ending = os.path.join(weight_path, weights_name + \"_\" + s.loss_name)\n\n trainset = load_trainset(data_path,lab=lab,normalize=False,load_list=load_list)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=mbsize,\n shuffle=True, num_workers=2 if dataset in (0,1) else 0)\n \n print(\"NETWORK PATH:\", weight_path_ending)\n #define output channels of the model\n classes = 150\n #define model\n if mode == 0:\n classifier = generator(drop_rate,classes)\n elif mode == 1:\n classifier = unet(True,drop_rate,classes)\n elif mode == 2:\n classifier = color_unet(True,drop_rate,classes)\n elif mode == 3:\n classifier = middle_unet(True,drop_rate,classes)\n #load weights\n try:\n classifier.load_state_dict(torch.load(weight_path_ending))\n print(\"Loaded network weights from\", weight_path)\n except FileNotFoundError:\n print(\"Initialize new weights for the generator.\")\n #sys.exit(2)\n \n classifier.to(device)\n\n #save the hyperparameters to a JSON-file for better oranization\n model_description_path_ending = os.path.join(weight_path, s.model_description_name)\n # initialize model dict\n try:\n with open(model_description_path_ending, \"r\") as file:\n model_dict = json.load(file)\n except FileNotFoundError:\n model_dict = {}\n\n\n prev_epochs=0\n # save settings in dict if new weights are beeing initialized\n if not weights_name in model_dict.keys():\n model_dict[weights_name] = {\n \"loss_name\": loss_path_ending,\n \"epochs\": 0,\n \"batch_size\": mbsize,\n \"lr\": lr,\n \"lab\":lab,\n \"betas\": betas,\n \"image_loss_weight\": image_loss_weight,\n \"weighted_loss\":weighted_loss,\n \"model\":'classification '+['richzhang','U-Net','color U-Net','middle U-Net'][mode]\n }\n else:\n #load specified parameters from model_dict\n params=model_dict[weights_name]\n #mbsize=params['batch_size']\n betas=params['betas']\n #lr=params['lr']\n lab=params['lab']\n image_loss_weight=params['image_loss_weight']\n weighted_loss=params['weighted_loss']\n loss_path_ending=params['loss_name']\n #memorize how many epochs already were trained if we continue training\n prev_epochs=params['epochs']+1\n\n #optimizer\n optimizer=optim.Adam(classifier.parameters(),lr=lr,betas=betas)\n class_weight_path='resources/class-weights.npy'\n if weighted_loss:\n weights=np.load(class_weight_path)\n if dataset==0:\n class_weight_path='resources/cifar-lab-class-weights.pt'\n weights=torch.load(class_weight_path).numpy()\n elif dataset==2:\n if weight_lambda:\n class_weight_path = 'resources/probdist_lab.pt'\n prob_dict = torch.load(class_weight_path)\n prob = np.array(list(prob_dict.values()))\n weights = 1/((1 - weight_lambda)*prob/prob.sum() + weight_lambda/classes)\n else:\n class_weight_path = 'resources/class-weights-lab150-stl.pt'\n weights = torch.load(class_weight_path)\n \n print('Class-weights loaded from ' + class_weight_path) \n criterion = softCossEntropyLoss(weights=weights,device=device) if weighted_loss else softCossEntropyLoss(weights=None,device=device) \n loss_hist=[] \n soft_onehot = torch.load('resources/smooth_onehot150.pt',map_location=device)\n \n classifier.train() \n # run over epochs\n for e in (range(prev_epochs, prev_epochs + epochs) if not infinite_loop else count(prev_epochs)):\n g_running=0\n #load batches \n for i,batch in enumerate(trainloader):\n \n if dataset == 0: #cifar 10\n (image,_) = batch\n elif dataset in (1,2): #places\n image = batch\n \n #batch_size=image.shape[0]\n if dataset == 0: #cifar/stl 10\n image=np.transpose(image,(0,2,3,1))\n image=np.transpose(color.rgb2lab(image),(0,3,1,2))\n image=torch.from_numpy((image-np.array([50,0,0])[None,:,None,None])).float()\n \n X=image[:,:1,:,:].to(device) #set X to the Lightness of the image\n image=image[:,1:,:,:].to(device) #image is a and b channel\n \n #----------------------------------------------------------------------------------------\n ################################### Model optimization ##################################\n #----------------------------------------------------------------------------------------\n #clear gradients\n optimizer.zero_grad()\n #softmax activated distribution\n model_out=classifier(X).double()\n #create bin coded verion of ab ground truth\n binab=ab2bins(image.transpose(1,3).transpose(1,2))\n if mode==0: \n binab=F.interpolate(binab.float(),scale_factor=(.25,.25)).long() \n binab=torch.squeeze(binab,1) \n binab=soft_onehot[:,binab].transpose(0,1).double()\n #calculate loss \n loss=criterion(model_out,binab).mean(0)\n \n loss.backward()\n optimizer.step()\n\n g_running+=loss.item()\n loss_hist.append([e,loss.item()])\n\n #report running loss\n if (i+len(trainloader)*e)%report_freq==report_freq-1:\n print('Epoch %i, batch %i: \\tloss=%.2e'%(e+1,i+1,g_running/report_freq))\n g_running=0\n\n\n if s.save_weights and (i+len(trainloader)*e)%save_freq==save_freq-1:\n #save parameters\n try:\n torch.save(classifier.state_dict(),weight_path_ending)\n #torch.save(crit.state_dict(),crit_path)\n except FileNotFoundError:\n os.makedirs(weight_path)\n torch.save(classifier.state_dict(),weight_path_ending)\n #torch.save(crit.state_dict(),crit_path)\n print(\"Parameters saved\")\n\n if s.save_loss:\n #save loss history to file\n try:\n f=open(loss_path_ending,'a')\n np.savetxt(f,loss_hist,'%e')\n f.close()\n except FileNotFoundError:\n os.makedirs(s.loss_path)\n np.savetxt(loss_path_ending,loss_hist,'%e')\n loss_hist=[]\n\n #update epoch count in dict after each epoch\n model_dict[weights_name][\"epochs\"] = e \n #save it to file\n try:\n with open(model_description_path_ending, \"w\") as file:\n json.dump(model_dict, file, sort_keys=True, indent=4)\n except:\n print('Could not save to model dictionary (JSON-file)') \n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find(\"BatchNorm2d\") != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n\nif __name__ == '__main__':\n main(sys.argv[1:])","repo_name":"lukas-blecher/Colorization","sub_path":"train_classification.py","file_name":"train_classification.py","file_ext":"py","file_size_in_byte":11499,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"4"} +{"seq_id":"29709692394","text":"from targetpipe.calib.camera.filter_wheel import FWCalibrator\nfrom targetpipe.io.camera import Config\nConfig('checm')\n\nfrom tqdm import tqdm, trange\nfrom traitlets import Dict, List\nimport numpy as np\nimport pandas as pd\nimport matplotlib.lines as mlines\nfrom matplotlib.ticker import MultipleLocator, FormatStrFormatter, \\\n ScalarFormatter, FuncFormatter\nimport seaborn as sns\nfrom os.path import realpath, join, dirname\n\nfrom ctapipe.calib.camera.dl0 import CameraDL0Reducer\nfrom ctapipe.calib.camera.dl1 import CameraDL1Calibrator\nfrom ctapipe.core import Tool\nfrom ctapipe.image.charge_extractors import AverageWfPeakIntegrator\nfrom ctapipe.image.waveform_cleaning import CHECMWaveformCleanerAverage\nfrom targetpipe.io.eventfilereader import TargetioFileReader\nfrom targetpipe.calib.camera.r1 import TargetioR1Calibrator\nfrom targetpipe.fitting.chec import CHECMSPEFitter\nfrom targetpipe.io.pixels import Dead\nfrom targetpipe.calib.camera.adc2pe import TargetioADC2PECalibrator\nfrom targetpipe.plots.official import ChecmPaperPlotter\n\nfrom IPython import embed\n\n\nclass Scatter(ChecmPaperPlotter):\n name = 'Scatter'\n\n def __init__(self, config, tool, **kwargs):\n \"\"\"\n Parameters\n ----------\n config : traitlets.loader.Config\n Configuration specified by config file or cmdline arguments.\n Used to set traitlet values.\n Set to None if no configuration to pass.\n tool : ctapipe.core.Tool\n Tool executable that is calling this component.\n Passes the correct logger to the component.\n Set to None if no Tool to pass.\n kwargs\n \"\"\"\n super().__init__(config=config, tool=tool, **kwargs)\n\n # self.fig = plt.figure(figsize=(12, 8))\n # self.ax = self.fig.add_subplot(1, 1, 1)\n\n def add(self, x, y, y_err, label):\n c = self.ax._get_lines.get_next_color()\n # no_err = y_err == 0\n # err = ~no_err\n # self.ax.errorbar(x[no_err], y[no_err], fmt='o', mew=0.5, color=c, alpha=0.8, markersize=3, capsize=3)\n (_, caps, _) = self.ax.errorbar(x, y, yerr=y_err, fmt='o', mew=0.5, color=c, alpha=0.8, markersize=3, capsize=3, label=label)\n\n for cap in caps:\n cap.set_markeredgewidth(1)\n\n def create(self, x, y, y_err, label, x_label=\"\", y_label=\"\", title=\"\"):\n self.add(x, y, y_err, label)\n\n # self.ax.set_xscale('log')\n self.ax.set_yscale('log')\n # self.ax.set_xticks(x)\n self.ax.get_xaxis().set_major_formatter(ScalarFormatter())\n self.ax.get_yaxis().set_major_formatter(FuncFormatter(lambda y, _: '{:g}'.format(y)))\n # self.ax.xaxis.set_tick_params(\n # which='minor', # both major and minor ticks are affected\n # bottom='off', # ticks along the bottom edge are off\n # top='off', # ticks along the top edge are off\n # labelbottom='off') # labels along the bottom edge are off\n # self.ax.xaxis.set_tick_params(which='major', labelsize=6.5)\n\n self.ax.set_xlabel(x_label)\n self.ax.set_ylabel(y_label)\n self.fig.suptitle(title)\n # self.ax.xaxis.set_major_locator(AutoMinorLocator(5))\n # self.ax.yaxis.set_minor_locator(AutoMinorLocator(5))\n\n # axes[1].xaxis.set_minor_locator(AutoMinorLocator(5))\n # axes[2].yaxis.set_minor_locator(AutoMinorLocator(5))\n\n def save(self, output_path=None):\n # self.ax.legend(loc=2)\n super().save(output_path)\n\n\nclass FWInvestigator(Tool):\n name = \"FWInvestigator\"\n description = \"Investigate the FW\"\n\n aliases = Dict(dict())\n classes = List([])\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n self.fw_calibrator = None\n\n directory = \"/Users/Jason/Downloads/quick_analysis_results\"\n fw_np_path_y1 = join(directory, \"quick_analysis_area_withoutpreamp.npy\")\n fw_np_path_y2 = join(directory, \"quick_analysis_area_withpreamp-withfilterOD1.npy\")\n fw_np_path_y3 = join(directory, \"quick_analysis_area_withpreamp.npy\")\n fw_np_path_yerr1 = join(directory, \"quick_analysis_areaerr_withoutpreamp.npy\")\n fw_np_path_yerr2 = join(directory, \"quick_analysis_areaerr_withpreamp-withfilterOD1.npy\")\n fw_np_path_yerr3 = join(directory, \"quick_analysis_areaerr_withpreamp.npy\")\n fw_np_path_x1 = join(directory, \"quick_analysis_fwpos_withoutpreamp.npy\")\n fw_np_path_x2 = join(directory, \"quick_analysis_fwpos_withpreamp-withfilterOD1.npy\")\n fw_np_path_x3 = join(directory, \"quick_analysis_fwpos_withpreamp.npy\")\n\n self.fw_np_y1 = np.load(fw_np_path_y1)\n self.fw_np_y2 = np.load(fw_np_path_y2)\n self.fw_np_y3 = np.load(fw_np_path_y3)\n self.fw_np_yerr1 = np.load(fw_np_path_yerr1)\n self.fw_np_yerr2 = np.load(fw_np_path_yerr2)\n self.fw_np_yerr3 = np.load(fw_np_path_yerr3)\n self.fw_np_x1 = np.load(fw_np_path_x1)\n self.fw_np_x2 = np.load(fw_np_path_x2)\n self.fw_np_x3 = np.load(fw_np_path_x3)\n\n self.p_attenuation = None\n\n def setup(self):\n self.log_format = \"%(levelname)s: %(message)s [%(name)s.%(funcName)s]\"\n kwargs = dict(config=self.config, tool=self)\n\n self.fw_calibrator = FWCalibrator(**kwargs)\n\n script = \"filter_wheel\"\n self.p_attenuation = Scatter(**kwargs, script=script, figure_name=\"attenuation\")\n\n def start(self):\n\n con = np.concatenate\n df = pd.DataFrame(dict(\n position=con([self.fw_np_x1, self.fw_np_x2, self.fw_np_x3]),\n transmission=con([self.fw_np_y1, self.fw_np_y2, self.fw_np_y3]),\n error=con([self.fw_np_yerr1, self.fw_np_yerr2, self.fw_np_yerr3]),\n ))\n df = df.groupby('position').apply(np.mean)\n\n self.fw_calibrator.df = df\n self.fw_calibrator.save(self.fw_calibrator.fw_path)\n\n x = df['position']\n y = df['transmission']\n y_err = df['error']\n self.p_attenuation.create(x, y, y_err, '', \"Postion\", \"Transmission\", \"Filter Wheel Attenuation\")\n\n def finish(self):\n # Save figures\n self.p_attenuation.save()\n\nif __name__ == '__main__':\n exe = FWInvestigator()\n exe.run()\n","repo_name":"watsonjj/targetpipe","sub_path":"scripts/needs_update/filter_wheel.py","file_name":"filter_wheel.py","file_ext":"py","file_size_in_byte":6207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"36258965609","text":"def draw_star(n):\n for i in range(n, 0, -1):\n for j in range(1, i+1):\n if j%5==0 :\n print(\"#\", end=' ')\n else:\n print(\"*\",end=' ')\n print(\"\\r\")\nT=int(input())\nfor k in (0,T):\n n = int(input())\n draw_star(n)\n ","repo_name":"arya24x7/E_YANTRA","sub_path":"STAR_PY.py","file_name":"STAR_PY.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"70371530999","text":"\"\"\"\nThe Fibonacci series is a series of numbers where a number is the addition of the last two numbers,\nstarting with 0, and 1.\nThe Fibonacci series: 0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, .....\n\nWritten as a rule, the expression is:\nXn = Xn-1 + Xn-2\n\"\"\"\n\nprint(\"Program to print n terms of fibonacci series.\")\nn = int(input(\"Enter the value of n: \"))\na = -1\nb = 1\n\nfor i in range(1,n+1):\n c = a+b\n print(c,end=\" \")\n a = b\n b = c\n\nprint(\"\\nThese are first \"+str(n)+\" terms of fibonacci series.\")","repo_name":"BANSAL-NISHU/JOCP","sub_path":"Fibonacci_series.py","file_name":"Fibonacci_series.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"22816325365","text":"class SoundsData:\n def __init__(self, data):\n self.updated = data.get(\"updated\", None)\n self.matching = data.get(\"matching\", None)\n self.sounds = [Sound(sound) for sound in data.get(\"sounds\", [])]\n\n\nclass Sound:\n def __init__(self, data):\n self.amount = data.get(\"amount\", None)\n self.description = data.get(\"description\", None)\n self.verified = data.get(\"verified\", None)\n self.newsound = data.get(\"newsound\", None)\n self.matched = data.get(\"matched\", None)","repo_name":"palmtree5/pyboko","sub_path":"pyboko/models/sounds.py","file_name":"sounds.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"35545534418","text":"#!/usr/bin/env python\nimport argparse\nimport numpy as np\nimport pcl\n\nparser = argparse.ArgumentParser(description='Input Point Cloud Filename.')\nparser.add_argument('-f', '--filename', type=str)\nparser.add_argument('-o', '--output', type=str)\nparser.add_argument('--x_min', type=float)\nparser.add_argument('--x_max', type=float)\nparser.add_argument('--y_min', type=float)\nparser.add_argument('--y_max', type=float)\nparser.add_argument('--z_min', type=float)\nparser.add_argument('--z_max', type=float)\n\nargs = parser.parse_args()\n\ncloud = pcl.load(args.filename)\n\n# pcl::CropBox clipper;\n# clipper.setInputCloud(cloud);\nclipper = cloud.make_cropbox()\n\n# pcl::PCDWriter writer;\n# pcl::PointCloud::Ptr outcloud;\noutcloud = pcl.PointCloud()\n\n# clipper.setTranslation(Eigen::Vector3f(pose->tx, pose->ty, pose->tz));\n# clipper.setRotation(Eigen::Vector3f(pose->rx, pose->ry, pose->rz));\n# clipper.setMin(-Eigen::Vector4f(tracklet->l/2, tracklet->w/2, 0, 0));\n# clipper.setMax(Eigen::Vector4f(tracklet->l/2, tracklet->w/2, tracklet->h, 0));\n# clipper.filter(*outcloud);\ntx = 0\nty = 0\ntz = 0\nclipper.set_Translation(tx, ty, tz)\n\nrx = 0\nry = 0\nrz = 0\nclipper.set_Rotation(rx, ry, rz)\n\nminx = args.x_min\nminy = args.y_min\nminz = args.z_min\nmins = 0\n\nmaxx = args.x_max\nmaxy = args.y_max\nmaxz = args.z_max\nmaxs = 0\n\nclipper.set_MinMax(minx, miny, minz, mins, maxx, maxy, maxz, maxs)\noutcloud = clipper.filter()\n\npcl.save(outcloud, args.output)","repo_name":"willshw/mfvs","sub_path":"script/crop_pointcloud.py","file_name":"crop_pointcloud.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"4"} +{"seq_id":"33246518004","text":"# @author\n# Aakash Verma\n\n# www.aboutaakash.in\n# www.innoskrit.in\n# Instagram: https://www.instagram.com/aakashverma1102/\n# LinkedIn: https://www.linkedin.com/in/aakashverma1124/\n\n# Problem Link: https://www.hackerrank.com/challenges/two-pluses/problem?h_r=internal-search\n# Only functions are written in this code\n\n\ndef twoPluses(grid):\n \n temp = list()\n temp.append(['O'] * (m + 2))\n for i in range(n):\n temp.append(['O'] + list(grid[i]) + ['O'])\n temp.append(['O'] * (m + 2))\n \n # check temp grid\n # for i in range(n + 2):\n # for j in range(m + 2):\n # print(temp[i][j], end = \" \")\n # print()\n \n grid = temp\n answer = 0\n \n for i in range(1, n + 1):\n for j in range(1, m + 1):\n \n r = 0\n while grid[i + r][j] == 'G' and grid[i - r][j] == 'G' and grid[i][j + r] == 'G' and grid[i][j - r] == 'G':\n grid[i + r][j] = grid[i - r][j] = grid[i][j + r] = grid[i][j - r] = 'V'\n \n for I in range(1, n + 1):\n for J in range(1, m + 1):\n R = 0\n while grid[I + R][J] == 'G' and grid[I - R][J] == 'G' and grid[I][J + R] == 'G' and grid[I][J - R] == 'G':\n answer = max(answer, (4*r + 1) * (4*R + 1) )\n R += 1\n r += 1\n \n r = 0\n while grid[i + r][j] == 'V' and grid[i - r][j] == 'V' and grid[i][j + r] == 'V' and grid[i][j - r] == 'V':\n grid[i + r][j] = grid[i - r][j] = grid[i][j + r] = grid[i][j - r] = 'G'\n r += 1\n return answer","repo_name":"aakashverma1124/Data-Structures-and-Algorithms-for-Interviews","sub_path":"Python/Graph/Emas_Supercomputer.py","file_name":"Emas_Supercomputer.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"4"} +{"seq_id":"21139422336","text":"# Alexa has two stacks of non-negative integers, stack a and stack b where index 0 denotes the top of the stack. Alexa challenges Nick to play the following game:\n# In each move, Nick can remove one integer from the top of either stack a or stack b.\n# Nick keeps a running sum of the integers he removes from the two stacks.\n# Nick is disqualified from the game if, at any point, his running sum becomes greater than some integer x given at the beginning of the game.\n# Nick's final score is the total number of integers he has removed from the two stacks.\n# Given a, b, and x, find the maximum score Nick can achieve.\n# Example\n# a = [1, 2, 3, 4, 5]\n# b = [6, 7, 8, 9]\n# x = 12\n#\n# Answer:\n# The maximum number of values Nick can remove is 4. There are two sets of choices with this result.\n# Solution 1: Add 1, 2, 3, 4 from a, total sum is 10.\n# Solution 2: Add 1, 2, 3 from a and 6 from b, total sum is 12.\n\nimport sys\n\n\ndef nick_game_moving(a, b, x):\n i, j, sum = 0, 0, 0\n n = len(a)\n m = len(b)\n\n while i < n and x - a[i] >= 0:\n x -= a[i]\n i += 1\n moving_step = i\n\n while j < m and i >= 0:\n x -= b[j]\n j += 1\n while x < 0 and i > 0:\n i -= 1\n x += a[i]\n if x >= 0 and i + j > moving_step:\n moving_step = i + j\n print(\"The maximum number of values Nick can remove is \", moving_step)\n\n\na = [1, 2, 3, 4, 5]\nb = [6, 7, 8, 9]\n\nc = [9, 1, 1, 1]\nd = [6, 8, 1]\n\nx = 12\n\nnick_game_moving(a, b, x)\nnick_game_moving(c, d, x)\n","repo_name":"nguyentuc/cs_algorithms","sub_path":"two_stacks_problem.py","file_name":"two_stacks_problem.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"70067220596","text":"# -*- coding: utf-8 -*-\n\n# import Python Libs\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nimport logging\nimport os.path\nimport sys\n\nimport pkg_resources\n\n# Import Salt Libs\nimport salt.config\nimport salt.loader\nimport salt.modules.boto_route53 as boto_route53\nimport salt.utils.versions\nfrom salt.ext import six\n\n# Import Salt Testing Libs\nfrom tests.support.mixins import LoaderModuleMockMixin\nfrom tests.support.runtests import RUNTIME_VARS\nfrom tests.support.unit import TestCase, skipIf\n\n# import Python Third Party Libs\n# pylint: disable=import-error\ntry:\n import boto\n\n boto.ENDPOINTS_PATH = os.path.join(\n RUNTIME_VARS.TESTS_DIR, \"unit/files/endpoints.json\"\n )\n from moto import mock_route53_deprecated\n\n HAS_MOTO = True\nexcept ImportError:\n HAS_MOTO = False\n\n def mock_route53_deprecated(self):\n \"\"\"\n if the mock_route53_deprecated function is not available due to import failure\n this replaces the decorated function with stub_function.\n Allows boto_route53 unit tests to use the @mock_route53_deprecated decorator\n without a \"NameError: name 'mock_route53_deprecated' is not defined\" error.\n \"\"\"\n\n def stub_function(self):\n pass\n\n return stub_function\n\n\n# pylint: enable=import-error\n\nlog = logging.getLogger(__name__)\n\nrequired_moto = \"0.3.7\"\nrequired_moto_py3 = \"1.0.1\"\n\n\ndef _has_required_moto():\n \"\"\"\n Returns True or False depending on if ``moto`` is installed and at the correct version,\n depending on what version of Python is running these tests.\n \"\"\"\n if not HAS_MOTO:\n return False\n else:\n moto_version = salt.utils.versions.LooseVersion(\n pkg_resources.get_distribution(\"moto\").version\n )\n if moto_version < salt.utils.versions.LooseVersion(required_moto):\n return False\n elif six.PY3 and moto_version < salt.utils.versions.LooseVersion(\n required_moto_py3\n ):\n return False\n\n return True\n\n\n@skipIf(HAS_MOTO is False, \"The moto module must be installed.\")\n@skipIf(\n _has_required_moto() is False,\n \"The moto module must be >= to {0} for \"\n \"PY2 or {1} for PY3.\".format(required_moto, required_moto_py3),\n)\n@skipIf(\n sys.version_info > (3, 6),\n \"Disabled for 3.7+ pending https://github.com/spulec/moto/issues/1706.\",\n)\nclass BotoRoute53TestCase(TestCase, LoaderModuleMockMixin):\n \"\"\"\n TestCase for salt.modules.boto_route53 module\n \"\"\"\n\n def setup_loader_modules(self):\n self.opts = salt.config.DEFAULT_MINION_OPTS.copy()\n self.opts[\"route53.keyid\"] = \"GKTADJGHEIQSXMKKRBJ08H\"\n self.opts[\"route53.key\"] = \"askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs\"\n utils = salt.loader.utils(self.opts)\n funcs = salt.loader.minion_mods(\n self.opts, utils=utils, whitelist=[\"boto_route53\", \"config\"]\n )\n return {\n boto_route53: {\n \"__opts__\": self.opts,\n \"__utils__\": utils,\n \"__salt__\": funcs,\n },\n }\n\n def setUp(self):\n TestCase.setUp(self)\n # __virtual__ must be caller in order for _get_conn to be injected\n boto_route53.__virtual__()\n boto_route53.__init__(self.opts)\n\n def tearDown(self):\n del self.opts\n\n @mock_route53_deprecated\n def test_create_healthcheck(self):\n \"\"\"\n tests that given a valid instance id and valid ELB that\n register_instances returns True.\n \"\"\"\n expected = {\n \"result\": {\n \"CreateHealthCheckResponse\": {\n \"HealthCheck\": {\n \"HealthCheckConfig\": {\n \"FailureThreshold\": \"3\",\n \"IPAddress\": \"10.0.0.1\",\n \"ResourcePath\": \"/\",\n \"RequestInterval\": \"30\",\n \"Type\": \"HTTPS\",\n \"Port\": \"443\",\n \"FullyQualifiedDomainName\": \"blog.saltstack.furniture\",\n },\n \"HealthCheckVersion\": \"1\",\n },\n },\n },\n }\n healthcheck = boto_route53.create_healthcheck(\n \"10.0.0.1\",\n fqdn=\"blog.saltstack.furniture\",\n hc_type=\"HTTPS\",\n port=443,\n resource_path=\"/\",\n )\n del healthcheck[\"result\"][\"CreateHealthCheckResponse\"][\"HealthCheck\"][\n \"CallerReference\"\n ]\n del healthcheck[\"result\"][\"CreateHealthCheckResponse\"][\"HealthCheck\"][\"Id\"]\n self.assertEqual(healthcheck, expected)\n","repo_name":"Kamatera/salt","sub_path":"tests/unit/modules/test_boto_route53.py","file_name":"test_boto_route53.py","file_ext":"py","file_size_in_byte":4714,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"36119338468","text":"import numpy as np\n\n# Input: expects 3xN matrix of points\n# Returns R,t\n# R = 3x3 rotation matrix\n# t = 3x1 column vector\n\ndef rigid_transform_3D(A, B):\n assert A.shape == B.shape\n\n num_rows, num_cols = A.shape\n if num_rows != 3:\n raise Exception(f\"matrix A is not 3xN, it is {num_rows}x{num_cols}\")\n\n num_rows, num_cols = B.shape\n if num_rows != 3:\n raise Exception(f\"matrix B is not 3xN, it is {num_rows}x{num_cols}\")\n\n # find mean column wise\n centroid_A = np.mean(A, axis=1)\n centroid_B = np.mean(B, axis=1)\n\n # ensure centroids are 3x1\n centroid_A = centroid_A.reshape(-1, 1)\n centroid_B = centroid_B.reshape(-1, 1)\n\n # subtract mean\n Am = A - centroid_A\n Bm = B - centroid_B\n\n H = Am @ np.transpose(Bm)\n\n # sanity check\n #if linalg.matrix_rank(H) < 3:\n # raise ValueError(\"rank of H = {}, expecting 3\".format(linalg.matrix_rank(H)))\n\n # find rotation\n U, S, Vt = np.linalg.svd(H)\n R = Vt.T @ U.T\n\n # special reflection case\n if np.linalg.det(R) < 0:\n print(\"det(R) < R, reflection detected!, correcting for it ...\")\n Vt[2,:] *= -1\n R = Vt.T @ U.T\n\n t = -R @ centroid_A + centroid_B\n\n return R, t\n","repo_name":"ABC-iRobotics/irob-saf","sub_path":"irob_utils/scripts/irob_utils/rigid_transform_3D.py","file_name":"rigid_transform_3D.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"4"} +{"seq_id":"38388485284","text":"from discord.ext import commands\nfrom utils import config as cfg\nimport os\nimport asyncpg\n\nbot = commands.Bot(command_prefix=cfg.config[0]['discord']['prefix'],\n description='Bot para agendamento de estudos da Comunidade Mentoria IAC',\n reconnect=True)\n\n\nasync def create_database_pool():\n bot.pg_con = await asyncpg.create_pool(user=cfg.config[0]['postgres']['user'], password=cfg.config[0]['postgres']['password'], database=cfg.config[0]['postgres']['database'], host=cfg.config[0]['postgres']['host'], port=5432)\n await bot.pg_con.execute(\"CREATE TABLE IF NOT EXISTS Events (id SERIAL PRIMARY KEY, message_id bigint, calendar_id text, date_time timestamp with time zone, event_name text , event_link text);\")\n await bot.pg_con.execute(\"CREATE TABLE IF NOT EXISTS Notifications (id SERIAL PRIMARY KEY, user_id bigint, message_id bigint, calendar_id text, event_link text );\")\n\n\n\n@bot.event\nasync def on_ready():\n print(\"Username: {0}\\nID: {0.id}\".format(bot.user))\n\n for filename in os.listdir('./cogs'):\n if filename.endswith('.py'):\n bot.load_extension(f'cogs.{filename[:-3]}')\n\n\nasync def on_command_error(self, ctx, error):\n if isinstance(error, (commands.CommandNotFound, commands.BadArgument, commands.MissingRequiredArgument)):\n return await ctx.send(error)\n else:\n return\n\nbot.loop.run_until_complete(create_database_pool())\nbot.run(cfg.config[0]['discord'][\"token\"])\n","repo_name":"Rehzende/botCalendar","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"16771623765","text":"class Max: \n def MaxPath(self, root):\n self.res= float('-inf')\n self.dfs(root)\n return self.res\n def dfs(self, root):\n if not root:\n return 0\n left = max(0, self.dfs(root.left))\n right = max(0, self.dfs(root.right))\n self.res = max(self.res, left + root.val + right)\n return root.val + max(left, right)\n","repo_name":"hbulpf/pydemo","sub_path":"src/algo_cases/第5章/5_5.py","file_name":"5_5.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"4"} +{"seq_id":"2631730129","text":"class Solution:\n def findDisappearedNumbers(self, nums: List[int]) -> List[int]:\n i = 0\n n = len(nums)\n \n while i < n:\n pos = nums[i] - 1\n if nums[i] != nums[pos]:\n nums[i],nums[pos] = nums[pos],nums[i]\n else:\n i += 1\n result = []\n for i,item in enumerate(nums):\n if i + 1 != item:\n result.append(i+1)\n return result\n ","repo_name":"Dawit2119/competitive-programming","sub_path":"0448-find-all-numbers-disappeared-in-an-array/0448-find-all-numbers-disappeared-in-an-array.py","file_name":"0448-find-all-numbers-disappeared-in-an-array.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"69907374516","text":"def loadDataSet():\r\n return [[1,3,4],\r\n [2,3,5],\r\n [1,2,3,5],\r\n [2,5]]\r\n\r\ndef createC1(dataSet):\r\n C1 = []\r\n for transaction in dataSet:\r\n for item in transaction:\r\n if not [item] in C1:\r\n C1.append([item])\r\n C1.sort()\r\n return C1\r\n\r\ndef judge_is_contain(a,b) -> bool:#判断 a 是否是 b 的子集\r\n a = set(a)\r\n b = set(b)\r\n if a.issubset(b):\r\n return True\r\n else:\r\n return False\r\n\r\ndef scanD(D, Ck, minSupport): #python3这里需要使用迭代器\r\n ssCnt = {}\r\n for tid in D:\r\n for can in Ck:\r\n if judge_is_contain(can,tid):\r\n temp = tuple(can)\r\n if temp not in ssCnt:\r\n ssCnt[temp] = 1\r\n else:\r\n ssCnt[temp] += 1\r\n numItems = float(len(D))\r\n retList = [] #过滤list\r\n supportData = {}\r\n for key in ssCnt:\r\n support = ssCnt[key] / numItems\r\n if support >= minSupport:\r\n retList.append(key)\r\n supportData[key] = support\r\n return retList, supportData\r\n # ToDo:这里的retList是这种[(1,), (3,), (2,), (5,)]\r\n # ToDo:supportData = {(1,): 0.5, (3,): 0.75, (4,): 0.25, (2,): 0.75, (5,): 0.75} 是set类型\r\n\r\ndef aprioriGen(Lk, k): #create Ck\r\n retList = [] #传参数的时候要注意数组先变为set\r\n lenLk = len(Lk)\r\n for i in range(lenLk):\r\n for j in range(i+1, lenLk):\r\n L1 = list(Lk[i])[:k-2]\r\n L2 = list(Lk[j])[:k-2]\r\n L1.sort();L2.sort()\r\n if L1 == L2:\r\n temp = []\r\n temp.extend(Lk[i])\r\n temp.extend(Lk[j])\r\n retList.append(tuple(set(temp)))\r\n return retList\r\n\r\ndef apriori(dataSet, minSupport):\r\n C1 = createC1(dataSet) #[[1], [2], [3], [4], [5]]\r\n D = dataSet #[[1, 3, 4], [2, 3, 5], [1, 2, 3, 5], [2, 5]]\r\n L1, supportData = scanD(D, C1, minSupport)\r\n L = [L1]\r\n k = 2\r\n while(len(L[k-2])>0):\r\n Ck = aprioriGen(L[k-2], k)\r\n Lk, supk = scanD(D, Ck, minSupport)\r\n supportData.update(supk)\r\n L.append(Lk)\r\n k += 1\r\n return L, supportData\r\n#上面的代码仅仅实现了找频繁相机,没有实现关联相机的查找 另外实现了关联项集之后想想 好玩的例子看看knn\r\n#TODO:下面从频繁项开始构建关联规则\r\n\r\ndef generateRules(L, supportData, minConf=0.7):\r\n bigRuleList = []\r\n for i in range(1, len(L)): # 不要[[3],[2],[5]]这种,要[2,5]这种\r\n for freqList in L[i]: # freqList是tuple类型\r\n H1 = [ [item] for item in freqList] #H1这里是list\r\n if (i > 1):\r\n rulesFromConseq(freqList, H1, supportData, bigRuleList, minConf) # 进一步合并\r\n else:\r\n calConf(freqList, H1, supportData, bigRuleList, minConf)\r\n return bigRuleList\r\n'''\r\nfreqList = 2,3,4,5\r\nH1 = [[2], [3], [4], [5]]\r\n'''\r\n\r\ndef calConf(freqList, H, supportData, br1, minConf=0.7):\r\n prunedH = []\r\n for conseq in H:\r\n set1 = set(freqList)\r\n set2 = set(conseq)\r\n tuple_delta = tuple(set1 - set2)\r\n conf = supportData[freqList] / supportData[tuple_delta] #这里相当于用了一个条件概率来计算p->h\r\n conf2 = supportData[freqList] / supportData[tuple(conseq)]\r\n if conf >= minConf:\r\n if [tuple_delta,tuple(conseq)] not in br1:\r\n print(\"{} --> {} == {}\".format(tuple_delta, tuple(conseq), conf))\r\n br1.append([tuple_delta,tuple(conseq)])\r\n prunedH.append(tuple(conseq))\r\n if conf2 >= minConf:\r\n if [tuple(conseq),tuple_delta] not in br1:\r\n print(\"{} --> {} == {}\".format(tuple(conseq), tuple_delta, conf))\r\n br1.append([tuple(conseq),tuple_delta])\r\n prunedH.append(tuple_delta)\r\n return prunedH\r\n\r\ndef rulesFromConseq(freqList, H, supportData, br1, minConf = 0.7): #[2,3,5]\r\n m = len(H[0])\r\n if len(freqList) > m + 1:\r\n Hmp1 = aprioriGen(H,m + 1)\r\n Hmp1 = calConf(freqList, Hmp1, supportData, br1, minConf)\r\n if len(Hmp1)>1:\r\n rulesFromConseq(freqList, Hmp1, supportData, br1, minConf)\r\n\r\nL, supportData = apriori(loadDataSet(), 0.5)\r\ngenerateRules(L, supportData, 0.5)\r\n","repo_name":"jjn123456/First-experience-of-machine-learning","sub_path":"4.Apriori/Apriori.py","file_name":"Apriori.py","file_ext":"py","file_size_in_byte":4412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"70405495158","text":"#####################################################################################\nimport random\nfrom functions import *\nfrom node import Node\nfrom linkedlist import LinkedList\nfrom player import Player\nfrom opponent import Opponent\nfrom deck import Deck\nfrom card import Card\n\n#####################################################################################\n\n#####################################################################################\n\n# Introduction ASCII & Player information gathering\n\nplayer = player_info_gathering()\nplayer_introduction(player)\n\n# Declaration of Deck\n\ndeck = Deck()\n\n# Opponents Generation\n\nopp_one = Opponent(\"Amy\")\nopp_two = Opponent(\"Jack\")\nopp_three = Opponent(\"Dilara\")\nopp_four = Opponent(\"Lucas\")\nopp_five = Opponent(\"Jeff\")\n\n# Allocation of Money for player and AI\n\nplayer.money = int(input(\"How much money would you like to start with?\"))\nopp_one.money = player.money\nopp_two.money = player.money\nopp_three.money = player.money\nopp_four.money = player.money\nopp_five.money = player.money\nbig_blind = player.money / 50\nsmall_blind = big_blind / 2\n\n# We start off with the player as the dealer.\n# Declaring a circularly linked list to make a 'table' environment.\n\ntable = LinkedList()\ntable.insert(player)\ntable.insert(opp_one)\ntable.insert(opp_two)\ntable.insert(opp_three)\ntable.insert(opp_four)\ntable.insert(opp_five)\n# Adding the circular link.\ncircular = table.head\n\nwhile (circular.next):\n circular = circular.next\ncircular.next = table.head\n\n# Flop, turn and river\n\nflop_t_r = []\n\n\n# Start of the command loop\n\ndeck_count = 0\npot = 0\ncurrent = table.head\ncurrent = current.next\n\nwhile player.money > 0:\n # Small blind and big blind\n current.player.money -= small_blind\n pot += small_blind\n current.next.player.money -= big_blind\n pot += big_blind\n # Dealing out of cards.\n deck.shuffle()\n print(\"Dealing phase:\")\n deal_cards(current, deck_count, deck, table)\n\n # Pre-flop betting\n printh(player)\n pot = preflop_bet(table, pot, player, big_blind)\n # Now time to create flop\n print(\"Pot: \" + str(pot))\n flop_t_r = flop(flop_t_r, deck, deck_count)\n printh(player)\n pot = postflop_bet(table, pot, player, big_blind, flop_t_r) + pot\n print(\"Pot: \" + str(pot))\n flop_t_r = turn(flop_t_r, deck, deck_count)\n printh(player)\n pot = postflop_bet(table, pot, player, big_blind, flop_t_r) + pot\n flop_t_r = river(flop_t_r, deck, deck_count)\n printh(player)\n pot = postflop_bet(table, pot, player, big_blind, flop_t_r) + pot\n\n break\n\n\n","repo_name":"haezera/texas_holdem","sub_path":"tholdem.py","file_name":"tholdem.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"626483020","text":"import json\n\nif __name__ == '__main__':\n with open('countries.json', 'r', encoding='utf-8') as file:\n countries = json.load(file)\n religions = {}\n for country in countries:\n religions[country['religion']] = religions.get(country['religion'], []) + [country['country']]\n with open('religion.json', 'w', encoding='utf-8') as file:\n json.dump(religions, file, indent=' ')","repo_name":"Evgenii141988/Generation_Python_professionals","sub_path":"Generation_Python_professionals/task_4_4_10.py","file_name":"task_4_4_10.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"2990129681","text":"import random\n\n########################################################################################################################\n\n\ndef game_selector_greeting():\n\n print()\n\n print('Welcome to the Game Selector!')\n\n print()\n\n print('The current available games are \"Rock, Paper Scissors\" (RPS), \"Twenty-One\" (21), \"Tic-Tac-Toe (TTT), '\n 'the \"Python Quiz\" (PQ), \"Hang Man\" (HM), \"Go Fish\" (GF), \"Uno\" (1), \"War\" and \"Connect Four\"!')\n\n print()\n\n\ndef game_selector_hub():\n\n while True:\n\n user_game_input = input('Enter the game you would like to play, or if you need help type \"Help\" if you want me '\n 'state the different games. Or if you don\\'t want to play anything type \"Quit\" ')\n\n if (user_game_input.upper() == 'RPS') or (user_game_input.upper() == 'ROCK, PAPER SCISSORS'):\n\n rock_paper_scissors_game()\n\n elif (user_game_input.upper() == 'TWENTY-ONE') or (user_game_input.upper() == '21'):\n\n twenty_one_game()\n\n elif (user_game_input.upper() == 'TIC-TAC-TOE') or (user_game_input.upper() == 'TTT'):\n\n tic_tac_toe_game()\n\n elif (user_game_input.upper() == 'PYTHON QUIZ') or (user_game_input.upper() == 'PQ'):\n\n python_quiz_game()\n\n elif (user_game_input.upper() == 'HANG MAN') or (user_game_input.upper() == 'HM'):\n\n hang_man_game()\n\n elif user_game_input.upper() == 'HELP':\n\n print()\n\n print('The current available games are \"Rock, Paper Scissors\" (RPS), \"Twenty-One\" (21), \"Tic-Tac-Toe '\n '(TTT), the \"Python Quiz\" (PQ), \"Hang Man\" (HM), \"Go Fish (GF), \"Uno\" (1) and \"Connect Four\"!')\n\n print()\n\n elif (user_game_input.upper() == 'GO FISH') or (user_game_input.upper() == 'GF'):\n\n go_fish_game()\n\n elif (user_game_input.upper() == 'UNO') or (user_game_input.upper() == '1'):\n\n uno_game()\n\n elif user_game_input.upper() == 'WAR':\n\n war_game()\n\n elif user_game_input.upper() == 'CONNECT FOUR':\n\n connect_four_game()\n\n elif user_game_input.upper() == 'QUIT':\n\n print('See you later alligator!')\n\n exit()\n\n else:\n\n print('Please type in a valid input!')\n\n########################################################################################################################\n\n\ndef rock_paper_scissors_game():\n\n rock_paper_scissors_dictionary = {'Wins': 0, 'Losses': 0, 'Ties': 0, 'Games Played': 0}\n\n def rock_paper_scissors_game_rules():\n\n print()\n\n print('Welcome to Rock, Paper Scissors! Try your best to beat the computer!')\n\n print()\n\n def rock_paper_scissors_weapon_picker():\n\n rock_paper_scissors_weapons = ['ROCK', 'PAPER', 'SCISSORS']\n\n rock_paper_scissors_computer_weapon = random.choice(rock_paper_scissors_weapons)\n\n while True:\n\n rock_paper_scissors_user_weapon = input('Do you want to use Rock, Paper or Scissors? ')\n\n if rock_paper_scissors_user_weapon.upper() in rock_paper_scissors_weapons:\n\n break\n\n else:\n\n print('Please pick Rock, Paper or Scissors!')\n\n return rock_paper_scissors_computer_weapon.upper(), rock_paper_scissors_user_weapon.upper()\n\n def rock_paper_scissors_outcome_calc(computer_weapon, user_weapon):\n\n if computer_weapon == user_weapon:\n\n print('You and the computer both picked the same thing!')\n\n rock_paper_scissors_dictionary['Ties'] += 1\n\n elif computer_weapon == 'ROCK' and user_weapon == 'SCISSORS':\n\n print('The computer crushed your scissors!')\n\n rock_paper_scissors_dictionary['Losses'] += 1\n\n elif computer_weapon == 'PAPER' and user_weapon == 'ROCK':\n\n print('The computer covered your rock!')\n\n rock_paper_scissors_dictionary['Losses'] += 1\n\n elif computer_weapon == 'SCISSORS' and user_weapon == 'PAPER':\n\n print('The computer cut your paper!')\n\n rock_paper_scissors_dictionary['Losses'] += 1\n\n elif user_weapon == 'ROCK' and computer_weapon == 'SCISSORS':\n\n print('You crushed the computer\\'s scissors!')\n\n rock_paper_scissors_dictionary['Wins'] += 1\n\n elif user_weapon == 'PAPER' and computer_weapon == 'ROCK':\n\n print('You covered the computer\\'s rock!')\n\n rock_paper_scissors_dictionary['Wins'] += 1\n\n elif user_weapon == 'SCISSORS' and computer_weapon == 'PAPER':\n\n print('You cut the computer\\'s paper!')\n\n rock_paper_scissors_dictionary['Wins'] += 1\n\n def rock_paper_scissors_replay():\n\n print()\n\n print('Here\\'s the score:')\n\n print(rock_paper_scissors_dictionary)\n\n print()\n\n while True:\n\n rock_paper_scissors_replay_input = input('Do you want to play again? ')\n\n if rock_paper_scissors_replay_input.upper() == 'YES':\n\n print('Okay, here we go again!')\n\n print()\n\n break\n\n elif rock_paper_scissors_replay_input.upper() == 'NO':\n\n print('Okay, I\\'ll see you later!')\n\n print()\n\n game_selector_hub()\n\n break\n\n else:\n\n print('Please enter either \"Yes\" or \"No\"!')\n\n rock_paper_scissors_game_rules()\n\n while True:\n\n rock_paper_scissors_computer_user_weapons = rock_paper_scissors_weapon_picker()\n\n rock_paper_scissors_outcome_calc(rock_paper_scissors_computer_user_weapons[0], rock_paper_scissors_computer_user_weapons[1])\n\n rock_paper_scissors_dictionary['Games Played'] += 1\n\n rock_paper_scissors_replay()\n\n########################################################################################################################\n\n\ndef twenty_one_game():\n\n twenty_one_card_dictionary = {'Ace': 0, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '10': 10, 'Jack': 10, 'Queen': 10, 'King': 10}\n\n twenty_one_score_dictionary = {'Wins': 0, 'Losses': 0, 'Ties': 0, 'Games Played': 0}\n\n def twenty_one_rules():\n\n print()\n\n print('Welcome to 21! Try and get as close to 21 as possible!')\n\n print()\n\n print('If you go over 21 you lose! The same applies if the computer goes over 21, then it loses!')\n\n print('The deck will be shuffled an indicated amount of times and will be distributed between you and the computer.')\n\n print('When prompted, type either you want a card or not, the player with the highest number wins! (Given that you didn\\'t bust!)')\n\n print()\n\n print('Good luck!')\n\n print()\n\n def twenty_one_deck_maker():\n\n twenty_one_entire_card_deck = []\n\n twenty_one_suit_deck = ['Ace', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'Jack', 'Queen', 'King']\n\n twenty_one_deck_counter = 0\n\n while twenty_one_deck_counter < 4:\n\n for card in twenty_one_suit_deck:\n\n twenty_one_entire_card_deck.append(card)\n\n twenty_one_deck_counter += 1\n\n while True:\n\n twenty_one_times_shuffled = input('How many times do you want the deck to be shuffled? ')\n\n if twenty_one_times_shuffled.isdigit():\n\n twenty_one_shuffles = int(twenty_one_times_shuffled)\n\n break\n\n else:\n\n print('Type in an integer!')\n\n print()\n\n print('The deck will be shuffled {} time(s)!'.format(twenty_one_shuffles))\n\n while twenty_one_shuffles > 0:\n\n random.shuffle(twenty_one_entire_card_deck)\n\n twenty_one_shuffles -= 1\n\n return twenty_one_entire_card_deck\n\n def twenty_one_gameplay(card_deck):\n\n twenty_one_user_hand = []\n\n twenty_one_computer_hand_hidden = []\n\n twenty_one_computer_hand_visible = []\n\n twenty_one_first_card = 0\n\n while twenty_one_first_card < 2:\n\n twenty_one_user_hand.append(card_deck[0])\n\n card_deck.remove(card_deck[0])\n\n twenty_one_computer_hand_hidden.append(card_deck[0])\n\n card_deck.remove(card_deck[0])\n\n twenty_one_first_card += 1\n\n twenty_one_computer_hand_visible.append(twenty_one_computer_hand_hidden[0])\n\n twenty_one_computer_hand_visible.append('X')\n\n print()\n\n print('Your Hand:')\n\n print(twenty_one_user_hand)\n\n print()\n\n print('Computer\\'s Hand:')\n\n print(twenty_one_computer_hand_visible)\n\n print()\n\n twenty_one_user_hand_value = 0\n\n for card in twenty_one_user_hand:\n\n if card == 'Ace':\n\n while True:\n\n twenty_one_ace_value = input('Do you want your Ace to have a value of 1 or 11? ')\n\n if twenty_one_ace_value.isdigit():\n\n if (twenty_one_ace_value == '1') or (twenty_one_ace_value == '11'):\n\n twenty_one_user_hand_value = twenty_one_user_hand_value + int(twenty_one_ace_value)\n\n break\n\n else:\n\n print('The value has to be 1 or 11!')\n\n print()\n\n else:\n\n print('Type in either \"1\" or \"11\".')\n\n print()\n\n else:\n\n twenty_one_user_hand_value = twenty_one_user_hand_value + twenty_one_card_dictionary[card]\n\n twenty_one_computer_hand_value = 0\n\n for card in twenty_one_computer_hand_hidden:\n\n if card == 'Ace':\n\n if twenty_one_computer_hand_value <= 10:\n\n twenty_one_computer_hand_value = twenty_one_computer_hand_value + 11\n\n else:\n\n twenty_one_computer_hand_value = twenty_one_computer_hand_value + 1\n\n else:\n\n twenty_one_computer_hand_value = twenty_one_computer_hand_value + twenty_one_card_dictionary[card]\n\n twenty_one_user_stay_counter = 0\n\n twenty_one_computer_stay_counter = 0\n\n while True:\n\n if (twenty_one_user_stay_counter == 1) and (twenty_one_computer_stay_counter == 1):\n\n print('Okay, you and the computer both want to stay!')\n\n print()\n\n break\n\n if twenty_one_user_hand_value == 21:\n\n print('You got 21! Congrats!')\n\n print()\n\n break\n\n elif twenty_one_user_hand_value > 21:\n\n print('You went over 21! You busted!')\n\n print()\n\n break\n\n else:\n\n print('Your hand\\'s current value is {}.'.format(twenty_one_user_hand_value))\n\n print()\n\n while True:\n\n twenty_one_user_input = input('Do you want to draw a card? (\"Yes\"/\"No\") ')\n\n if twenty_one_user_input.upper() == 'YES':\n\n print('You drew a card!')\n\n print()\n\n twenty_one_card_draw = card_deck[0]\n\n twenty_one_user_hand.append(twenty_one_card_draw)\n\n card_deck.remove(twenty_one_card_draw)\n\n if twenty_one_card_draw == 'Ace':\n\n while True:\n\n twenty_one_ace_value = input('Do you want your Ace to have a value of 1 or 11? ')\n\n if twenty_one_ace_value.isdigit():\n\n if (twenty_one_ace_value == '1') or (twenty_one_ace_value == '11'):\n\n twenty_one_user_hand_value = twenty_one_user_hand_value + int(twenty_one_ace_value)\n\n break\n\n else:\n\n print('The value has to be 1 or 11!')\n\n print()\n\n else:\n\n print('Type in either \"1\" or \"11\".')\n\n print()\n\n else:\n\n twenty_one_user_hand_value = twenty_one_user_hand_value + twenty_one_card_dictionary[twenty_one_card_draw]\n\n break\n\n elif twenty_one_user_input.upper() == 'NO':\n\n twenty_one_user_stay_counter = 1\n\n print('Okay, you decided to stay.')\n\n print()\n\n break\n\n else:\n\n print('Please enter either \"Yes\" or \"No\".')\n\n print()\n\n if twenty_one_computer_hand_value == 21:\n\n print('The computer got 21!')\n\n print()\n\n break\n\n elif twenty_one_computer_hand_value > 21:\n\n print('The computer busted!')\n\n print()\n\n break\n\n elif 16 <= twenty_one_computer_hand_value <= 20:\n\n twenty_one_computer_stay_counter = 1\n\n print('The computer decided to stay!')\n\n print()\n\n else:\n\n print('The computer drew a card!')\n\n twenty_one_computer_hand_hidden.append(card_deck[0])\n\n twenty_one_computer_hand_value = twenty_one_computer_hand_value + twenty_one_card_dictionary[card_deck[0]]\n\n card_deck.remove(card_deck[0])\n\n twenty_one_computer_hand_visible.append('X')\n\n print()\n\n print('Your Hand:')\n\n print(twenty_one_user_hand)\n\n print()\n\n print('Computer\\'s Hand:')\n\n print(twenty_one_computer_hand_visible)\n\n print()\n\n if (twenty_one_user_stay_counter == 1) and (twenty_one_computer_stay_counter == 1):\n\n print('Okay, you and the computer both want to stay!')\n\n print()\n\n break\n\n print('Ending Hands:')\n\n print()\n\n print('Your Hand:')\n\n print(twenty_one_user_hand)\n\n print('Hand value: {}'.format(twenty_one_user_hand_value))\n\n print()\n\n print('Computer\\'s Hand:')\n\n print(twenty_one_computer_hand_hidden)\n\n print('Hand value: {}'.format(twenty_one_computer_hand_value))\n\n print()\n\n return twenty_one_user_hand_value, twenty_one_computer_hand_value\n\n def twenty_one_replay(user_value, computer_value):\n\n if (user_value > 21) and (computer_value > 21):\n\n print('You and the computer both busted!')\n\n print()\n\n twenty_one_score_dictionary['Ties'] += 1\n\n elif user_value == computer_value:\n\n print('You tied with the computer!')\n\n print()\n\n twenty_one_score_dictionary['Ties'] += 1\n\n elif (user_value < 22) and (computer_value > 21):\n\n print('You win since the computer busted!')\n\n print()\n\n twenty_one_score_dictionary['Wins'] += 1\n\n elif (user_value < 22) and (computer_value < 22) and (user_value > computer_value):\n\n print('You won since you had a higher score than the computer!')\n\n print()\n\n twenty_one_score_dictionary['Wins'] += 1\n\n elif (user_value > 21) and (computer_value < 22):\n\n print('You lost since you busted!')\n\n print()\n\n twenty_one_score_dictionary['Losses'] += 1\n\n elif (user_value < 22) and (computer_value < 22) and (computer_value > user_value):\n\n print('You lost since the computer had a higher score than you!')\n\n print()\n\n twenty_one_score_dictionary['Losses'] += 1\n\n print('Here\\'s the score:')\n\n print(twenty_one_score_dictionary)\n\n print()\n\n while True:\n\n twenty_one_replay_input = input('Would you like to play again? (\"Yes\"/\"No\") ')\n\n if twenty_one_replay_input.upper() == 'YES':\n\n print('Okay, let\\'s go again!')\n\n print()\n\n break\n\n elif twenty_one_replay_input.upper() == 'NO':\n\n print('Okay, have a good day!')\n\n print()\n\n game_selector_hub()\n\n else:\n\n print('Please type either \"Yes\" or \"No\"!')\n\n print()\n\n twenty_one_rules()\n\n while True:\n\n twenty_one_gameplay_deck = twenty_one_deck_maker()\n\n twenty_one_hand_values = twenty_one_gameplay(twenty_one_gameplay_deck)\n\n twenty_one_score_dictionary['Games Played'] += 1\n\n twenty_one_replay(twenty_one_hand_values[0], twenty_one_hand_values[1])\n\n########################################################################################################################\n\n\ndef tic_tac_toe_game():\n\n tic_tac_toe_dictionary = {'Wins': 0, 'Losses': 0, 'Ties': 0, 'Games Played': 0}\n\n tic_tac_toe_board_unseen = [' ', ' ', ' ',\n ' ', ' ', ' ',\n ' ', ' ', ' ']\n\n def tic_tac_toe_game_rules():\n\n print()\n\n print('Welcome to Tic Tac Toe! Try to get your X\\'s to be three in a row!')\n\n print()\n\n print('Here is the board.')\n\n print()\n\n print('1 | 2 | 3')\n print('----------')\n print('4 | 5 | 6')\n print('----------')\n print('7 | 8 | 9')\n\n print()\n\n print('The numbers correspond to the places in which you, or the computer, can put an \"X\" or \"O\".')\n\n print()\n\n print('There will be a coin toss to see who goes first.')\n\n print()\n\n def tic_tac_toe_coin_toss():\n\n tic_tac_toe_coin = random.randint(0, 1)\n\n tic_tac_toe_user_coin = input('Do you pick heads or tales? (\"1\" for heads, \"0\" for tales) ')\n\n if (tic_tac_toe_user_coin != '0') and (tic_tac_toe_user_coin != '1'):\n\n print('Input either \"1\" for heads or \"0\" for tales!')\n\n return tic_tac_toe_coin_toss()\n\n elif int(tic_tac_toe_user_coin) != tic_tac_toe_coin:\n\n print('The computer won the coin toss!')\n\n tic_tac_toe_first_turn = 0\n\n else:\n\n print('You won the coin toss!')\n\n tic_tac_toe_first_turn = 1\n\n return tic_tac_toe_first_turn\n\n def tic_tac_toe_gameplay_hub(tic_tac_toe_player_1):\n\n if tic_tac_toe_player_1 == 0:\n\n tic_tac_toe_computer_player_1()\n\n elif tic_tac_toe_player_1 == 1:\n\n tic_tac_toe_user_player_1()\n\n def tic_tac_toe_computer_player_1():\n\n tic_tac_toe_computer_turns = 5\n\n print()\n\n print('The computer gets the first move.')\n\n print()\n\n while True:\n\n while True:\n\n tic_tac_toe_computer_index = random.randint(0, 8)\n\n if (tic_tac_toe_board_unseen[tic_tac_toe_computer_index] != 'O') and (tic_tac_toe_board_unseen[tic_tac_toe_computer_index] != 'X'):\n\n tic_tac_toe_board_unseen[tic_tac_toe_computer_index] = 'O'\n\n tic_tac_toe_computer_turns -= 1\n\n print('The computer placed its\\' \"O\"!')\n\n print(tic_tac_toe_board_unseen[0], ' | ', tic_tac_toe_board_unseen[1], ' | ',tic_tac_toe_board_unseen[2])\n print('--------------')\n print(tic_tac_toe_board_unseen[3], ' | ', tic_tac_toe_board_unseen[4], ' | ',tic_tac_toe_board_unseen[5])\n print('--------------')\n print(tic_tac_toe_board_unseen[6], ' | ', tic_tac_toe_board_unseen[7], ' | ',tic_tac_toe_board_unseen[8])\n\n print()\n\n break\n\n else:\n\n continue\n\n if tic_tac_toe_board_unseen[0] == 'O' and tic_tac_toe_board_unseen[1] == 'O' and tic_tac_toe_board_unseen[2] == 'O':\n\n print('The computer got three in a row!')\n\n tic_tac_toe_dictionary['Losses'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[3] == 'O' and tic_tac_toe_board_unseen[4] == 'O' and tic_tac_toe_board_unseen[5] == 'O':\n\n print('The computer got three in a row!')\n\n tic_tac_toe_dictionary['Losses'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[6] == 'O' and tic_tac_toe_board_unseen[7] == 'O' and tic_tac_toe_board_unseen[8] == 'O':\n\n print('The computer got three in a row!')\n\n tic_tac_toe_dictionary['Losses'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[0] == 'O' and tic_tac_toe_board_unseen[3] == 'O' and tic_tac_toe_board_unseen[6] == 'O':\n\n print('The computer got three in a row!')\n\n tic_tac_toe_dictionary['Losses'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[1] == 'O' and tic_tac_toe_board_unseen[4] == 'O' and tic_tac_toe_board_unseen[7] == 'O':\n\n print('The computer got three in a row!')\n\n tic_tac_toe_dictionary['Losses'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[2] == 'O' and tic_tac_toe_board_unseen[5] == 'O' and tic_tac_toe_board_unseen[8] == 'O':\n\n print('The computer got three in a row!')\n\n tic_tac_toe_dictionary['Losses'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[0] == 'O' and tic_tac_toe_board_unseen[4] == 'O' and tic_tac_toe_board_unseen[8] == 'O':\n\n print('The computer got three in a row!')\n\n tic_tac_toe_dictionary['Losses'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[2] == 'O' and tic_tac_toe_board_unseen[4] == 'O' and tic_tac_toe_board_unseen[6] == 'O':\n\n print('The computer got three in a row!')\n\n tic_tac_toe_dictionary['Losses'] += 1\n\n break\n\n elif tic_tac_toe_computer_turns == 0:\n\n print('The game is a tie!')\n\n tic_tac_toe_dictionary['Ties'] += 1\n\n break\n\n while True:\n\n tic_tac_toe_user_index = input('Enter the position where you want to put your \"X\" (A number from 1 to 9)): ')\n\n if (tic_tac_toe_user_index != '1') and (tic_tac_toe_user_index != '2') and (tic_tac_toe_user_index != '3') and (tic_tac_toe_user_index != '4') and (tic_tac_toe_user_index != '5') and (tic_tac_toe_user_index != '6') and (tic_tac_toe_user_index != '7') and (tic_tac_toe_user_index != '8') and (tic_tac_toe_user_index != '9'):\n\n print('Please input a valid position!')\n\n elif tic_tac_toe_board_unseen[int(tic_tac_toe_user_index) - 1] == 'O':\n\n print('The computer already has an \"O\" in this position!')\n\n elif tic_tac_toe_board_unseen[int(tic_tac_toe_user_index) - 1] == 'X':\n\n print('You already have an \"X\" in this position!')\n\n else:\n\n tic_tac_toe_board_unseen[int(tic_tac_toe_user_index) - 1] = 'X'\n\n print('You placed your \"X\"!')\n\n print(tic_tac_toe_board_unseen[0], ' | ', tic_tac_toe_board_unseen[1], ' | ',tic_tac_toe_board_unseen[2])\n print('--------------')\n print(tic_tac_toe_board_unseen[3], ' | ', tic_tac_toe_board_unseen[4], ' | ',tic_tac_toe_board_unseen[5])\n print('--------------')\n print(tic_tac_toe_board_unseen[6], ' | ', tic_tac_toe_board_unseen[7], ' | ',tic_tac_toe_board_unseen[8])\n\n print()\n\n break\n\n if tic_tac_toe_board_unseen[0] == 'X' and tic_tac_toe_board_unseen[1] == 'X' and tic_tac_toe_board_unseen[2] == 'X':\n\n print('You got three in a row!')\n\n tic_tac_toe_dictionary['Wins'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[3] == 'X' and tic_tac_toe_board_unseen[4] == 'X' and tic_tac_toe_board_unseen[5] == 'X':\n\n print('You got three in a row!')\n\n tic_tac_toe_dictionary['Wins'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[6] == 'X' and tic_tac_toe_board_unseen[7] == 'X' and tic_tac_toe_board_unseen[8] == 'X':\n\n print('You got three in a row!')\n\n tic_tac_toe_dictionary['Wins'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[0] == 'X' and tic_tac_toe_board_unseen[3] == 'X' and tic_tac_toe_board_unseen[6] == 'X':\n\n print('You got three in a row!')\n\n tic_tac_toe_dictionary['Wins'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[1] == 'X' and tic_tac_toe_board_unseen[4] == 'X' and tic_tac_toe_board_unseen[7] == 'X':\n\n print('You got three in a row!')\n\n tic_tac_toe_dictionary['Wins'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[2] == 'X' and tic_tac_toe_board_unseen[5] == 'X' and tic_tac_toe_board_unseen[8] == 'X':\n\n print('You got three in a row!')\n\n tic_tac_toe_dictionary['Wins'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[0] == 'X' and tic_tac_toe_board_unseen[4] == 'X' and tic_tac_toe_board_unseen[8] == 'X':\n\n print('You got three in a row!')\n\n tic_tac_toe_dictionary['Wins'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[2] == 'X' and tic_tac_toe_board_unseen[4] == 'X' and tic_tac_toe_board_unseen[6] == 'X':\n\n print('You got three in a row!')\n\n tic_tac_toe_dictionary['Wins'] += 1\n\n break\n\n def tic_tac_toe_user_player_1():\n\n tic_tac_toe_user_turns = 5\n\n print()\n\n print('You get the first move.')\n\n print()\n\n while True:\n\n while True:\n\n tic_tac_toe_user_index = input('Enter the position where you want to put your \"X\" (A number from 1 to 9)): ')\n\n if (tic_tac_toe_user_index != '1') and (tic_tac_toe_user_index != '2') and (tic_tac_toe_user_index != '3') and (tic_tac_toe_user_index != '4') and (tic_tac_toe_user_index != '5') and (tic_tac_toe_user_index != '6') and (tic_tac_toe_user_index != '7') and (tic_tac_toe_user_index != '8') and (tic_tac_toe_user_index != '9'):\n\n print('Please input a valid position!')\n\n elif tic_tac_toe_board_unseen[int(tic_tac_toe_user_index) - 1] == 'O':\n\n print('The computer already has an \"O\" in this position!')\n\n elif tic_tac_toe_board_unseen[int(tic_tac_toe_user_index) - 1] == 'X':\n\n print('You already have an \"X\" in this position!')\n\n else:\n\n tic_tac_toe_board_unseen[int(tic_tac_toe_user_index) - 1] = 'X'\n\n tic_tac_toe_user_turns -= 1\n\n print('You placed your \"X\"!')\n\n print(tic_tac_toe_board_unseen[0], ' | ', tic_tac_toe_board_unseen[1], ' | ',tic_tac_toe_board_unseen[2])\n print('--------------')\n print(tic_tac_toe_board_unseen[3], ' | ', tic_tac_toe_board_unseen[4], ' | ',tic_tac_toe_board_unseen[5])\n print('--------------')\n print(tic_tac_toe_board_unseen[6], ' | ', tic_tac_toe_board_unseen[7], ' | ',tic_tac_toe_board_unseen[8])\n\n print()\n\n break\n\n if tic_tac_toe_board_unseen[0] == 'X' and tic_tac_toe_board_unseen[1] == 'X' and tic_tac_toe_board_unseen[2] == 'X':\n\n print('You got three in a row!')\n\n tic_tac_toe_dictionary['Wins'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[3] == 'X' and tic_tac_toe_board_unseen[4] == 'X' and tic_tac_toe_board_unseen[5] == 'X':\n\n print('You got three in a row!')\n\n tic_tac_toe_dictionary['Wins'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[6] == 'X' and tic_tac_toe_board_unseen[7] == 'X' and tic_tac_toe_board_unseen[8] == 'X':\n\n print('You got three in a row!')\n\n tic_tac_toe_dictionary['Wins'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[0] == 'X' and tic_tac_toe_board_unseen[3] == 'X' and tic_tac_toe_board_unseen[6] == 'X':\n\n print('You got three in a row!')\n\n tic_tac_toe_dictionary['Wins'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[1] == 'X' and tic_tac_toe_board_unseen[4] == 'X' and tic_tac_toe_board_unseen[7] == 'X':\n\n print('You got three in a row!')\n\n tic_tac_toe_dictionary['Wins'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[2] == 'X' and tic_tac_toe_board_unseen[5] == 'X' and tic_tac_toe_board_unseen[8] == 'X':\n\n print('You got three in a row!')\n\n tic_tac_toe_dictionary['Wins'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[0] == 'X' and tic_tac_toe_board_unseen[4] == 'X' and tic_tac_toe_board_unseen[8] == 'X':\n\n print('You got three in a row!')\n\n tic_tac_toe_dictionary['Wins'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[2] == 'X' and tic_tac_toe_board_unseen[4] == 'X' and tic_tac_toe_board_unseen[6] == 'X':\n\n print('You got three in a row!')\n\n tic_tac_toe_dictionary['Wins'] += 1\n\n break\n\n elif tic_tac_toe_user_turns == 0:\n\n print('The game is a tie!')\n\n tic_tac_toe_dictionary['Ties'] += 1\n\n break\n\n while True:\n\n tic_tac_toe_computer_index = random.randint(0, 8)\n\n if (tic_tac_toe_board_unseen[tic_tac_toe_computer_index] != 'O') and (tic_tac_toe_board_unseen[tic_tac_toe_computer_index] != 'X'):\n\n tic_tac_toe_board_unseen[tic_tac_toe_computer_index] = 'O'\n\n print('The computer placed its\\' \"O\"!')\n\n print(tic_tac_toe_board_unseen[0], ' | ', tic_tac_toe_board_unseen[1], ' | ',tic_tac_toe_board_unseen[2])\n print('--------------')\n print(tic_tac_toe_board_unseen[3], ' | ', tic_tac_toe_board_unseen[4], ' | ',tic_tac_toe_board_unseen[5])\n print('--------------')\n print(tic_tac_toe_board_unseen[6], ' | ', tic_tac_toe_board_unseen[7], ' | ',tic_tac_toe_board_unseen[8])\n\n print()\n\n break\n\n else:\n\n continue\n\n if tic_tac_toe_board_unseen[0] == 'O' and tic_tac_toe_board_unseen[1] == 'O' and tic_tac_toe_board_unseen[2] == 'O':\n\n print('The computer got three in a row!')\n\n tic_tac_toe_dictionary['Losses'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[3] == 'O' and tic_tac_toe_board_unseen[4] == 'O' and tic_tac_toe_board_unseen[5] == 'O':\n\n print('The computer got three in a row!')\n\n tic_tac_toe_dictionary['Losses'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[6] == 'O' and tic_tac_toe_board_unseen[7] == 'O' and tic_tac_toe_board_unseen[8] == 'O':\n\n print('The computer got three in a row!')\n\n tic_tac_toe_dictionary['Losses'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[0] == 'O' and tic_tac_toe_board_unseen[3] == 'O' and tic_tac_toe_board_unseen[6] == 'O':\n\n print('The computer got three in a row!')\n\n tic_tac_toe_dictionary['Losses'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[1] == 'O' and tic_tac_toe_board_unseen[4] == 'O' and tic_tac_toe_board_unseen[7] == 'O':\n\n print('The computer got three in a row!')\n\n tic_tac_toe_dictionary['Losses'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[2] == 'O' and tic_tac_toe_board_unseen[5] == 'O' and tic_tac_toe_board_unseen[8] == 'O':\n\n print('The computer got three in a row!')\n\n tic_tac_toe_dictionary['Losses'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[0] == 'O' and tic_tac_toe_board_unseen[4] == 'O' and tic_tac_toe_board_unseen[8] == 'O':\n\n print('The computer got three in a row!')\n\n tic_tac_toe_dictionary['Losses'] += 1\n\n break\n\n elif tic_tac_toe_board_unseen[2] == 'O' and tic_tac_toe_board_unseen[4] == 'O' and tic_tac_toe_board_unseen[6] == 'O':\n\n print('The computer got three in a row!')\n\n tic_tac_toe_dictionary['Losses'] += 1\n\n break\n\n def tic_tac_toe_replay():\n\n print()\n\n print('Here\\'s the scoreboard:')\n\n print(tic_tac_toe_dictionary)\n\n print()\n\n while True:\n\n tic_tac_toe_replay_input = input('Do you want to play again? (\"Yes\"/\"No\") ')\n\n if tic_tac_toe_replay_input.upper() == 'YES':\n\n print('Okay, let\\'s go again!')\n\n tic_tac_toe_board_unseen[0] = ' '\n tic_tac_toe_board_unseen[1] = ' '\n tic_tac_toe_board_unseen[2] = ' '\n tic_tac_toe_board_unseen[3] = ' '\n tic_tac_toe_board_unseen[4] = ' '\n tic_tac_toe_board_unseen[5] = ' '\n tic_tac_toe_board_unseen[6] = ' '\n tic_tac_toe_board_unseen[7] = ' '\n tic_tac_toe_board_unseen[8] = ' '\n\n print()\n\n break\n\n elif tic_tac_toe_replay_input.upper() == 'NO':\n\n print('Okay, I\\'ll see you later!')\n\n print()\n\n game_selector_hub()\n\n else:\n\n print('Please input either \"Yes\" or \"No\"!')\n\n tic_tac_toe_game_rules()\n\n while True:\n\n tic_tac_toe_first_play = tic_tac_toe_coin_toss()\n\n tic_tac_toe_gameplay_hub(tic_tac_toe_first_play)\n\n tic_tac_toe_dictionary['Games Played'] += 1\n\n tic_tac_toe_replay()\n\n########################################################################################################################\n\n\ndef python_quiz_game():\n\n python_quiz_dictionary = {'Lives': 3, 'Hints': 3, 'Last Section Completed': 0}\n\n python_quiz_answers_list = ['A', 'B', 'C', 'D']\n\n def python_quiz_rules():\n\n print()\n\n print('Welcome to the Python Quiz!')\n\n print()\n\n print('The quiz is broken up into three different sections, each with five questions each. There is an easy, medium and hard section each with questions with their respective difficulty.')\n\n print('Within the quiz you only have three lives, if a question is missed you lose a life. Once you\\'re all out of lives, the quiz is over!!')\n\n print('You are also given three hints, however once you use all of them you can\\'t use them again!')\n\n print('After each section there will be a bonus question, if you answer it correctly you will be given an extra life!')\n\n print()\n\n print('Good Luck!')\n\n print()\n\n def python_quiz_easy_sec():\n\n print()\n\n print('Section 1 - Difficulty Easy:')\n\n print()\n print()\n\n print('1. What is not true of a variable in Python?')\n print('A) It stores a value that you assign to it.')\n print('B) It\\'s name can start with a number.')\n print('C) It\\'s value can be overwritten/updated by reassigning it later on in a program.')\n print('D) You can add variables together.')\n\n while True:\n\n python_quiz_question_1 = input('Enter \"A\", \"B\", \"C\", \"D\" or \"Hint\": ')\n\n if python_quiz_question_1.upper() == 'HINT':\n\n if python_quiz_dictionary['Hints'] > 0:\n\n print('Try to remember the rules for naming variables.')\n\n python_quiz_dictionary['Hints'] -= 1\n\n else:\n\n print('You are out of hints!')\n\n elif python_quiz_question_1.upper() not in python_quiz_answers_list:\n\n print('Please input a valid answer!')\n\n elif python_quiz_question_1.upper() != 'B':\n\n print('That\\'s incorrect! The correct answer is \"B\"!')\n\n python_quiz_dictionary['Lives'] -= 1\n\n break\n\n else:\n\n print('That\\'s right!')\n\n break\n\n if python_quiz_dictionary['Lives'] == 0:\n print('You ran out of lives!')\n\n print()\n\n python_quiz_replay()\n\n print()\n print()\n\n print('2. What is the symbol that goes after calling a function?')\n print('A) \"_\"')\n print('B) \"#\"')\n print('C) \"()\"')\n print('D) \"*\"')\n\n while True:\n\n python_quiz_question_2 = input('Enter \"A\", \"B\", \"C\", \"D\" or \"Hint\": ')\n\n if python_quiz_question_2.upper() == 'HINT':\n\n if python_quiz_dictionary['Hints'] > 0:\n\n print('Look at python\\'s many functions such as \"print\" and \"input\". What do they have in common?')\n\n python_quiz_dictionary['Hints'] -= 1\n\n else:\n\n print('You are out of hints!')\n\n elif python_quiz_question_2.upper() not in python_quiz_answers_list:\n\n print('Please input a valid answer!')\n\n elif python_quiz_question_2.upper() != 'C':\n\n print('That\\'s incorrect! The correct answer is \"C\"!')\n\n python_quiz_dictionary['Lives'] -= 1\n\n break\n\n else:\n\n print('That\\'s right!')\n\n break\n\n if python_quiz_dictionary['Lives'] == 0:\n\n print('You ran out of lives!')\n\n print()\n\n python_quiz_replay()\n\n print()\n print()\n\n print('3. What is the output of this code?')\n print()\n print('a = 5')\n print('b = 10')\n print('print(a + b)')\n print()\n print('A) \"5\"')\n print('B) \"10\"')\n print('C) \"ab\"')\n print('D) \"15\"')\n\n while True:\n\n python_quiz_question_3 = input('Enter \"A\", \"B\", \"C\", \"D\" or \"Hint\": ')\n\n if python_quiz_question_3.upper() == 'HINT':\n\n if python_quiz_dictionary['Hints'] > 0:\n\n print('Remember that variables store the values that they are assigned.')\n\n python_quiz_dictionary['Hints'] -= 1\n\n else:\n\n print('You are out of hints!')\n\n elif python_quiz_question_3.upper() not in python_quiz_answers_list:\n\n print('Please input a valid answer!')\n\n elif python_quiz_question_3.upper() != 'D':\n\n print('That\\'s incorrect! The correct answer is \"D\"!')\n\n python_quiz_dictionary['Lives'] -= 1\n\n break\n\n else:\n\n print('That\\'s right!')\n\n break\n\n if python_quiz_dictionary['Lives'] == 0:\n\n print('You ran out of lives!')\n\n print()\n\n python_quiz_replay()\n\n print()\n print()\n\n print('4. Which of the following creates an empty list?')\n print('A) \"list = {}\"')\n print('B) \"list = []\"')\n print('C) \"list = ()\"')\n print('D) \"list = | |\"')\n\n while True:\n\n python_quiz_question_4 = input('Enter \"A\", \"B\", \"C\", \"D\" or \"Hint\": ')\n\n if python_quiz_question_4.upper() == 'HINT':\n\n if python_quiz_dictionary['Hints'] > 0:\n\n print('Within the answers \"A\", \"B\" and \"C\", there is a tuple, list and dictionary being made.')\n\n python_quiz_dictionary['Hints'] -= 1\n\n else:\n\n print('You are out of hints!')\n\n elif python_quiz_question_4.upper() not in python_quiz_answers_list:\n\n print('Please input a valid answer!')\n\n elif python_quiz_question_4.upper() != 'B':\n\n print('That\\'s incorrect! The correct answer is \"B\"!')\n\n python_quiz_dictionary['Lives'] -= 1\n\n break\n\n else:\n\n print('That\\'s right!')\n\n break\n\n if python_quiz_dictionary['Lives'] == 0:\n\n print('You ran out of lives!')\n\n print()\n\n python_quiz_replay()\n\n print()\n print()\n\n print('5. True or False, you can change the data type of a variable?')\n print('A) True')\n print('B) False')\n\n while True:\n\n python_quiz_question_5 = input('Enter \"A\", \"B\", \"C\", \"D\" or \"Hint\": ')\n\n if python_quiz_question_5.upper() == 'HINT':\n\n if python_quiz_dictionary['Hints'] > 0:\n\n print('Try to remember python\\'s functions for the specific data types.')\n\n python_quiz_dictionary['Hints'] -= 1\n\n else:\n\n print('You are out of hints!')\n\n elif python_quiz_question_5.upper() not in python_quiz_answers_list:\n\n print('Please input a valid answer!')\n\n elif python_quiz_question_5.upper() != 'A':\n\n print('That\\'s incorrect! The correct answer is \"A\"!')\n\n python_quiz_dictionary['Lives'] -= 1\n\n break\n\n else:\n\n print('That\\'s right!')\n\n break\n\n if python_quiz_dictionary['Lives'] == 0:\n\n print('You ran out of lives!')\n\n print()\n\n python_quiz_replay()\n\n print()\n\n print('Here\\'s what you ended the first section with:')\n\n python_quiz_dictionary['Last Section Completed'] += 1\n\n print(python_quiz_dictionary)\n\n print()\n\n print('I\\'ll also give you an extra hint for later!')\n\n python_quiz_dictionary['Hints'] += 1\n\n def python_quiz_medium_sec():\n\n print()\n print()\n\n print('Section 2 - Difficulty Medium:')\n\n print()\n print()\n\n print('6. What is the index of \"b\" in this list?')\n print('list = [a, b, c, d]')\n print('A) 0')\n print('B) 1')\n print('C) 2')\n print('D) 3')\n\n while True:\n\n python_quiz_question_6 = input('Enter \"A\", \"B\", \"C\", \"D\" or \"Hint\": ')\n\n if python_quiz_question_6.upper() == 'HINT':\n\n if python_quiz_dictionary['Hints'] > 0:\n\n print('Remember that when counting indexes you start at 0.')\n\n python_quiz_dictionary['Hints'] -= 1\n\n else:\n\n print('You are out of hints!')\n\n elif python_quiz_question_6.upper() not in python_quiz_answers_list:\n\n print('Please input a valid answer!')\n\n elif python_quiz_question_6.upper() != 'B':\n\n print('That\\'s incorrect! The correct answer is \"B\"!')\n\n python_quiz_dictionary['Lives'] -= 1\n\n break\n\n else:\n\n print('That\\'s right!')\n\n break\n\n if python_quiz_dictionary['Lives'] == 0:\n\n print('You ran out of lives!')\n\n print()\n\n python_quiz_replay()\n\n print()\n print()\n\n print('7. What is the statement used to end a loop?')\n print('A) \"continue\"')\n print('B) \"pass\"')\n print('C) \"break\"')\n print('D) \"exit\"')\n\n while True:\n\n python_quiz_question_7 = input('Enter \"A\", \"B\", \"C\", \"D\" or \"Hint\": ')\n\n if python_quiz_question_7.upper() == 'HINT':\n\n if python_quiz_dictionary['Hints'] > 0:\n\n print('The term \"exit\" applies to a function used to exit the program so that\\'s probably not it.')\n\n python_quiz_dictionary['Hints'] -= 1\n\n else:\n\n print('You are out of hints!')\n\n elif python_quiz_question_7.upper() not in python_quiz_answers_list:\n\n print('Please input a valid answer!')\n\n elif python_quiz_question_7.upper() != 'C':\n\n print('That\\'s incorrect! The correct answer is \"C\"!')\n\n python_quiz_dictionary['Lives'] -= 1\n\n break\n\n else:\n\n print('That\\'s right!')\n\n break\n\n if python_quiz_dictionary['Lives'] == 0:\n\n print('You ran out of lives!')\n\n print()\n\n python_quiz_replay()\n\n print()\n print()\n\n print('8. What is the main purpose of a for loop?')\n print('A) To loop through a program until a specific condition is met.')\n print('B) To iterate through something such as a string or list.')\n print('C) To loop through a program infinitely.')\n print('D) To serve as a place to store values for variables.')\n\n while True:\n\n python_quiz_question_8 = input('Enter \"A\", \"B\", \"C\", \"D\" or \"Hint\": ')\n\n if python_quiz_question_8.upper() == 'HINT':\n\n if python_quiz_dictionary['Hints'] > 0:\n\n print('Think of how a for loop works, try to read a program that uses one and see how it does so.')\n\n python_quiz_dictionary['Hints'] -= 1\n\n else:\n\n print('You are out of hints!')\n\n elif python_quiz_question_8.upper() not in python_quiz_answers_list:\n\n print('Please input a valid answer!')\n\n elif python_quiz_question_8.upper() != 'B':\n\n print('That\\'s incorrect! The correct answer is \"B\"!')\n\n python_quiz_dictionary['Lives'] -= 1\n\n break\n\n else:\n\n print('That\\'s right!')\n\n break\n\n if python_quiz_dictionary['Lives'] == 0:\n\n print('You ran out of lives!')\n\n print()\n\n python_quiz_replay()\n\n print()\n print()\n\n print('9. What does the following code output?')\n print()\n print('a = 5')\n print('b = 5')\n print('while True:')\n print('print(a + b)')\n print()\n print('A) \"10\"')\n print('B) \"5\"')\n print('C) \"ab\"')\n print('D) \"10\" an infinite number of times.')\n\n while True:\n\n python_quiz_question_9 = input('Enter \"A\", \"B\", \"C\", \"D\" or \"Hint\": ')\n\n if python_quiz_question_9.upper() == 'HINT':\n\n if python_quiz_dictionary['Hints'] > 0:\n\n print('Notice if the loop has any way of exiting.')\n\n python_quiz_dictionary['Hints'] -= 1\n\n else:\n\n print('You are out of hints!')\n\n elif python_quiz_question_9.upper() not in python_quiz_answers_list:\n\n print('Please input a valid answer!')\n\n elif python_quiz_question_9.upper() != 'D':\n\n print('That\\'s incorrect! The correct answer is \"D\"!')\n\n python_quiz_dictionary['Lives'] -= 1\n\n break\n\n else:\n\n print('That\\'s right!')\n\n break\n\n if python_quiz_dictionary['Lives'] == 0:\n\n print('You ran out of lives!')\n\n print()\n\n python_quiz_replay()\n\n print()\n print()\n\n print('10. Is there a difference between \"round(var, 2)\" and \":.2f\" when used in a program?')\n print('A) Yes, \"round(var, 2)\" rounds the variable to two decimals while the other does not.')\n print('B) No, they both do the same thing in terms of rounding the variable to two decimals.')\n print('C) Yes, \":.2f\" rounds to two decimals while the other does not.')\n print('D) No, they both add two zeros at the end of the variable value.')\n\n while True:\n\n python_quiz_question_10 = input('Enter \"A\", \"B\", \"C\", \"D\" or \"Hint\": ')\n\n if python_quiz_question_10.upper() == 'HINT':\n\n if python_quiz_dictionary['Hints'] > 0:\n\n print('The \"f\" in \":.2f\" is for \"float\", meaning a decimal number.')\n\n python_quiz_dictionary['Hints'] -= 1\n\n else:\n\n print('You are out of hints!')\n\n elif python_quiz_question_10.upper() not in python_quiz_answers_list:\n\n print('Please input a valid answer!')\n\n elif python_quiz_question_10.upper() != 'B':\n\n print('That\\'s incorrect! The correct answer is \"B\"!')\n\n python_quiz_dictionary['Lives'] -= 1\n\n break\n\n else:\n\n print('That\\'s right!')\n\n break\n\n if python_quiz_dictionary['Lives'] == 0:\n\n print('You ran out of lives!')\n\n print()\n\n python_quiz_replay()\n\n print()\n\n print('Here\\'s what you ended the second section with:')\n\n python_quiz_dictionary['Last Section Completed'] += 1\n\n print(python_quiz_dictionary)\n\n print()\n\n print('I\\'ll also give you an extra hint for later!')\n\n python_quiz_dictionary['Hints'] += 1\n\n def python_quiz_hard_sec():\n\n print('Section 3 - Difficulty Hard:')\n\n print()\n print()\n\n print('11. True or False, the replace function only replaces characters of a string.')\n print('A) True')\n print('B) False')\n\n while True:\n\n python_quiz_question_11 = input('Enter \"A\", \"B\", \"C\", \"D\" or \"Hint\": ')\n\n if python_quiz_question_11.upper() == 'HINT':\n\n if python_quiz_dictionary['Hints'] > 0:\n\n print('It is typically used to replace a specified phrase.')\n\n python_quiz_dictionary['Hints'] -= 1\n\n else:\n\n print('You are out of hints!')\n\n elif python_quiz_question_11.upper() not in python_quiz_answers_list:\n\n print('Please input a valid answer!')\n\n elif python_quiz_question_11.upper() != 'A':\n\n print('That\\'s incorrect! The correct answer is \"A\"!')\n\n python_quiz_dictionary['Lives'] -= 1\n\n break\n\n else:\n\n print('That\\'s right!')\n\n break\n\n if python_quiz_dictionary['Lives'] == 0:\n\n print('You ran out of lives!')\n\n print()\n\n python_quiz_replay()\n\n print()\n print()\n\n print('12. How would you access a value of a variable that you return from a function with it being the second returned value of the function?')\n print()\n print('Assume it was returned like this: return var1, var2, var3. And that it was stored in a variable called \"var\".')\n print()\n print('A) You would type the variable name \"var\".')\n print('B) You would type \"var2\"')\n print('C) You would type \"var[var2]\"')\n print('D) You would type \"var[1]\"')\n\n while True:\n\n python_quiz_question_12 = input('Enter \"A\", \"B\", \"C\", \"D\" or \"Hint\": ')\n\n if python_quiz_question_12.upper() == 'HINT':\n\n if python_quiz_dictionary['Hints'] > 0:\n\n print('Think about how indexing works when trying to access a specific value of an index.')\n\n python_quiz_dictionary['Hints'] -= 1\n\n else:\n\n print('You are out of hints!')\n\n elif python_quiz_question_12.upper() not in python_quiz_answers_list:\n\n print('Please input a valid answer!')\n\n elif python_quiz_question_12.upper() != 'D':\n\n print('That\\'s incorrect! The correct answer is \"D\"!')\n\n python_quiz_dictionary['Lives'] -= 1\n\n break\n\n else:\n\n print('That\\'s right!')\n\n break\n\n if python_quiz_dictionary['Lives'] == 0:\n\n print('You ran out of lives!')\n\n print()\n\n python_quiz_replay()\n\n print()\n print()\n\n print('13. How is it possible to have a loop in a program without using any loop statements?')\n print('A) By using recursive functions to keep calling the same function until a condition is met.')\n print('B) It isn\\'t possible to do loops without any loop statements.')\n print('C) You can by using only if statements to keep checking for a condition.')\n print('D) In order to do this you would have to import a specific library.')\n\n while True:\n\n python_quiz_question_13 = input('Enter \"A\", \"B\", \"C\", \"D\" or \"Hint\": ')\n\n if python_quiz_question_13.upper() == 'HINT':\n\n if python_quiz_dictionary['Hints'] > 0:\n\n print('Think of how a loop works in terms of doing the same thing over and over.')\n\n python_quiz_dictionary['Hints'] -= 1\n\n else:\n\n print('You are out of hints!')\n\n elif python_quiz_question_13.upper() not in python_quiz_answers_list:\n\n print('Please input a valid answer!')\n\n elif python_quiz_question_13.upper() != 'A':\n\n print('That\\'s incorrect! The correct answer is \"A\"!')\n\n python_quiz_dictionary['Lives'] -= 1\n\n break\n\n else:\n\n print('That\\'s right!')\n\n break\n\n if python_quiz_dictionary['Lives'] == 0:\n\n print('You ran out of lives!')\n\n print()\n\n python_quiz_replay()\n\n print()\n print()\n\n print('14. What is the difference between a tuple and a list?')\n print('A) You can modify a list, since it is mutable, while you cannot modify a tuple, since it is immutable.')\n print('B) You can modify a tuple, which is mutable, while you cannot modify a list, since it is immutable.')\n print('C) You can only use indexing with a list.')\n print('D) You can only use indexing with a tuple.')\n\n while True:\n\n python_quiz_question_14 = input('Enter \"A\", \"B\", \"C\", \"D\" or \"Hint\": ')\n\n if python_quiz_question_14.upper() == 'HINT':\n\n if python_quiz_dictionary['Hints'] > 0:\n\n print('What happens when you try to add something to a tuple compared to a list?')\n\n python_quiz_dictionary['Hints'] -= 1\n\n else:\n\n print('You are out of hints!')\n\n elif python_quiz_question_14.upper() not in python_quiz_answers_list:\n\n print('Please input a valid answer!')\n\n elif python_quiz_question_14.upper() != 'A':\n\n print('That\\'s incorrect! The correct answer is \"A\"!')\n\n python_quiz_dictionary['Lives'] -= 1\n\n break\n\n else:\n\n print('That\\'s right!')\n\n break\n\n if python_quiz_dictionary['Lives'] == 0:\n print('You ran out of lives!')\n\n print()\n\n python_quiz_replay()\n\n print()\n print()\n\n print('15. When importing a library such as random, what syntax needs to be used when attempting to use the specific function \"randint\" from the library?')\n print('A) No special syntax is needed since the library is already imported.')\n print('B) You would type the name of the library with the variable inside parenthesis. random(var)')\n print('C) You type it with the library name followed by a period then the function with the variable in parenthesis. (random.randint(var))')\n print('D) You would type it with the library name followed by a period then the function being used while storing the value in a variable. (var = random.randint(x, y))')\n\n while True:\n\n python_quiz_question_15 = input('Enter \"A\", \"B\", \"C\", \"D\" or \"Hint\": ')\n\n if python_quiz_question_15.upper() == 'HINT':\n\n if python_quiz_dictionary['Hints'] > 0:\n\n print('If you are generating a random number for a dice game for instance, where would you store it and how would you type it out?')\n\n python_quiz_dictionary['Hints'] -= 1\n\n else:\n\n print('You are out of hints!')\n\n elif python_quiz_question_15.upper() not in python_quiz_answers_list:\n\n print('Please input a valid answer!')\n\n elif python_quiz_question_15.upper() != 'D':\n\n print('That\\'s incorrect! The correct answer is \"D\"!')\n\n python_quiz_dictionary['Lives'] -= 1\n\n break\n\n else:\n\n print('That\\'s right!')\n\n break\n\n if python_quiz_dictionary['Lives'] == 0:\n\n print('You ran out of lives!')\n\n print()\n\n python_quiz_replay()\n\n print()\n\n print('Here\\'s what you ended the third section with:')\n\n python_quiz_dictionary['Last Section Completed'] += 1\n\n print(python_quiz_dictionary)\n\n print()\n\n def python_quiz_bonus_questions():\n\n print()\n\n if python_quiz_dictionary['Last Section Completed'] == 1:\n\n print('Welcome to the first bonus question!')\n\n print()\n\n print('What year was the Python language born in?')\n\n while True:\n\n python_bonus_1_answer = input('Enter the year: ')\n\n if (python_bonus_1_answer.isnumeric()) and (python_bonus_1_answer != '1991') and (len(python_bonus_1_answer) == 4):\n\n print('That\\'s incorrect! Good try though!')\n\n break\n\n elif python_bonus_1_answer == '1991':\n\n print('Great Job! You earned an extra life!')\n\n python_quiz_dictionary['Lives'] += 1\n\n break\n\n else:\n\n print('Type a valid year!')\n\n elif python_quiz_dictionary['Last Section Completed'] == 2:\n\n print('Here is the second bonus question!')\n\n print()\n\n print('What type of language is python?')\n\n print('A) Object-Oriented')\n print('B) Logic')\n print('C) Procedural')\n print('D) Scripting')\n\n while True:\n\n python_bonus_2_answer = input('Enter either \"A\", \"B\", \"C\" or \"D\": ')\n\n if python_bonus_2_answer.upper() not in python_quiz_answers_list:\n\n print('Enter a valid answer please!')\n\n elif python_bonus_2_answer.upper() == 'A':\n\n print('Great job! Here\\'s an extra life!')\n\n python_quiz_dictionary['Lives'] += 1\n\n break\n\n else:\n\n print('That\\'s incorrect!')\n\n break\n\n def python_quiz_replay():\n\n print()\n\n if python_quiz_dictionary['Lives'] == 5:\n\n print('Great Job! You didn\\'t lose any lives as well as got both bonuses right!')\n\n elif python_quiz_dictionary['Lives'] == 4:\n\n print('You finished with an extra life! Awesome!')\n\n elif python_quiz_dictionary['Lives'] == 3:\n\n print('You started with three lives and ended with three lives, great work!')\n\n elif python_quiz_dictionary['Lives'] == 2:\n\n print('You ended with two lives left! I would go and review a little bit.')\n\n elif python_quiz_dictionary['Lives'] == 1:\n\n print('You barely made it through the quiz! I would go and study if I were you!')\n\n elif python_quiz_dictionary['Lives'] == 0:\n\n print('You lost all of your lives! You need to go do some studying!')\n\n print()\n\n while True:\n\n python_quiz_play_again = input('Would like to try the quiz again? (\"Yes/\"No\") ')\n\n if python_quiz_play_again.upper() == 'YES':\n\n print('Cool, let\\'s go again!')\n\n python_quiz_dictionary['Lives'] = 3\n\n python_quiz_dictionary['Hints'] = 3\n\n python_quiz_dictionary['Last Section Completed'] = 0\n\n python_quiz_gameplay_loop()\n\n break\n\n elif python_quiz_play_again.upper() == 'NO':\n\n print('Okay, see you later!')\n\n print()\n\n game_selector_hub()\n\n else:\n\n print('Please input either a \"Yes\" or a \"No\"!')\n\n def python_quiz_gameplay_loop():\n\n while True:\n\n python_quiz_easy_sec()\n python_quiz_bonus_questions()\n python_quiz_medium_sec()\n python_quiz_bonus_questions()\n python_quiz_hard_sec()\n python_quiz_replay()\n\n python_quiz_rules()\n\n python_quiz_gameplay_loop()\n\n\n########################################################################################################################\n\ndef hang_man_game():\n\n hang_man_score = {'Wins': 0, 'Losses': 0, 'Games Played': 0}\n\n def hang_man_rules():\n\n print()\n\n print('Welcome to Hangman!')\n\n print()\n\n print('Try and guess the letters of a word to reveal more of the word, or guess the word if you know what it is!')\n print('If you guess the letter correctly, it will appear and nothing happens to the man.')\n print('However, if you guess incorrectly then the man gradually gets drawn to look like he\\'s hanging!')\n print('If that happens then you lose and the man dies!')\n print('So try and guess correctly and use your big brain!')\n print('Oh, and you also have 6 lives, broken into 7 stages!')\n print()\n input('Type any key to continue: ')\n print()\n print('Here\\'s the different stages of the man being drawn.')\n print()\n print('Stage 1:')\n print('It\\'s just the noose.')\n print(''\n ' __________\\n'\n ' | |\\n'\n ' |\\n'\n ' |\\n'\n ' |\\n'\n ' |\\n')\n print()\n print('Stage 2:')\n print('The head is added.')\n print(''\n ' __________\\n'\n ' | |\\n'\n ' O |\\n'\n ' |\\n'\n ' |\\n'\n ' |\\n')\n print()\n print('Stage 3:')\n print('The body is added.')\n print(''\n ' __________\\n'\n ' | |\\n'\n ' O |\\n'\n ' | |\\n'\n ' |\\n'\n ' |\\n')\n print()\n print('Stage 4:')\n print('The left arm is added.')\n print(''\n ' __________\\n'\n ' | |\\n'\n ' O |\\n'\n ' |\\ |\\n'\n ' |\\n'\n ' |\\n')\n print()\n print('Stage 5:')\n print('The right arm is added.')\n print(''\n ' __________\\n'\n ' | |\\n'\n ' O |\\n'\n ' /|\\ |\\n'\n ' |\\n'\n ' |\\n')\n print()\n print('Stage 6:')\n print('The left leg is added.')\n print(''\n ' __________\\n'\n ' | |\\n'\n ' O |\\n'\n ' /|\\ |\\n'\n ' \\ |\\n'\n ' |\\n')\n print()\n print('Stage 7:')\n print('The right leg is added. And the man dies :(')\n print(''\n ' __________\\n'\n ' | |\\n'\n ' O |\\n'\n ' /|\\ |\\n'\n ' / \\ |\\n'\n ' |\\n')\n print()\n input('Type any key to continue: ')\n print()\n print()\n print()\n\n print('Let\\'s get started!')\n\n def hang_man_word_generator():\n\n hang_man_word_list = ['python', 'computer', 'hacker', 'pancakes', 'apollo', 'programming', 'telephone', 'network', 'website', 'gameplay', 'airpods', 'apple', 'component']\n\n hang_man_word_choice = random.choice(hang_man_word_list)\n\n return hang_man_word_choice\n\n def hang_man_gameplay(hang_man_word):\n\n hang_man_lives = 6\n\n hang_man_word_length = len(hang_man_word)\n\n hang_man_current_word_list = []\n\n hang_man_gameplay_list = []\n\n hang_man_guessed_letters_list = []\n\n for letter in hang_man_word:\n\n hang_man_current_word_list.append(letter)\n\n while hang_man_word_length > 0:\n\n hang_man_gameplay_list.append('_')\n\n hang_man_word_length -= 1\n\n while True:\n\n if hang_man_lives == 6:\n\n print(''\n ' __________\\n'\n ' | |\\n'\n ' |\\n'\n ' |\\n'\n ' |\\n'\n ' |\\n')\n\n elif hang_man_lives == 5:\n\n print(''\n ' __________\\n'\n ' | |\\n'\n ' O |\\n'\n ' |\\n'\n ' |\\n'\n ' |\\n')\n\n elif hang_man_lives == 4:\n\n print(''\n ' __________\\n'\n ' | |\\n'\n ' O |\\n'\n ' | |\\n'\n ' |\\n'\n ' |\\n')\n\n elif hang_man_lives == 3:\n\n print(''\n ' __________\\n'\n ' | |\\n'\n ' O |\\n'\n ' |\\ |\\n'\n ' |\\n'\n ' |\\n')\n\n elif hang_man_lives == 2:\n\n print(''\n ' __________\\n'\n ' | |\\n'\n ' O |\\n'\n ' /|\\ |\\n'\n ' |\\n'\n ' |\\n')\n\n elif hang_man_lives == 1:\n\n print(''\n ' __________\\n'\n ' | |\\n'\n ' O |\\n'\n ' /|\\ |\\n'\n ' \\ |\\n'\n ' |\\n')\n\n else:\n\n print(''\n ' __________\\n'\n ' | |\\n'\n ' O |\\n'\n ' /|\\ |\\n'\n ' / \\ |\\n'\n ' |\\n')\n\n print('You ran out of lives!')\n print()\n print('The word was ' + hang_man_word + '.')\n\n hang_man_score['Losses'] += 1\n\n break\n\n print(*hang_man_gameplay_list)\n\n if '_' not in hang_man_gameplay_list:\n\n print('You guessed the word correctly!')\n\n hang_man_score['Wins'] += 1\n\n break\n\n print()\n print()\n\n print('Letters guessed:')\n\n print(*hang_man_guessed_letters_list)\n\n hang_man_user_word_guess = input('Guess a letter of the word, or guess the entire word: ')\n\n if hang_man_user_word_guess.lower() == hang_man_word:\n\n print('Congrats! That\\'s the right word!')\n\n hang_man_score['Wins'] += 1\n\n break\n\n elif hang_man_user_word_guess.lower() in hang_man_guessed_letters_list:\n\n print('You already guessed that!')\n\n print()\n\n elif len(hang_man_user_word_guess) == len(hang_man_word):\n\n print('Good guess, but that\\'s not the right word!')\n\n hang_man_lives -= 1\n\n print()\n\n elif not (hang_man_user_word_guess.lower().isalpha()) or (1 < len(hang_man_user_word_guess) < len(hang_man_word)) or (len(hang_man_user_word_guess) > len(hang_man_word)):\n\n print('This isn\\'t in the alphabet!')\n\n print()\n\n elif hang_man_user_word_guess.lower() in hang_man_current_word_list:\n\n print(hang_man_user_word_guess.upper(), 'is a letter of the word!')\n\n for index in (i for i, letter in enumerate(hang_man_current_word_list) if letter == hang_man_user_word_guess):\n\n hang_man_gameplay_list[index] = hang_man_user_word_guess\n\n hang_man_guessed_letters_list.append(hang_man_user_word_guess)\n\n else:\n\n print(hang_man_user_word_guess.upper(), 'isn\\'t one of the letters of the word!')\n\n print()\n\n hang_man_lives -= 1\n\n hang_man_guessed_letters_list.append(hang_man_user_word_guess)\n\n def hang_man_replay():\n\n print()\n\n print('Here\\'s the score:')\n\n print(hang_man_score)\n\n print()\n\n while True:\n\n hang_man_user_replay = input('Would you like to play again? (\"Yes\"/\"No\") ')\n\n if hang_man_user_replay.lower() == 'yes':\n\n print('Okay, let\\'s play again!')\n\n print()\n\n break\n\n elif hang_man_user_replay.lower() == 'no':\n\n print('Okay, thanks for playing!')\n\n print()\n\n game_selector_hub()\n\n else:\n\n print('Please type either \"Yes\" or \"No\"!')\n\n hang_man_rules()\n\n while True:\n\n hang_man_computer_word = hang_man_word_generator()\n\n hang_man_gameplay(hang_man_computer_word)\n\n hang_man_score['Games Played'] += 1\n\n hang_man_replay()\n\n\n########################################################################################################################\n\ndef go_fish_game():\n\n go_fish_match_dictionary = {'User Books': 0, 'Computer Books': 0}\n\n go_fish_score_dictionary = {'Wins': 0, 'Losses': 0, 'Ties': 0, 'Games Played': 0}\n\n def go_fish_rules():\n\n print()\n\n print('Welcome to Go Fish!')\n\n print()\n\n print('The deck will be shuffled a number of times specified by you and will be distributed between you and the computer.')\n\n print('Once you have four of the same card, you have a book!')\n\n print('When it\\'s your turn, ask the computer if they have a card you want. If they don\\'t then you draw from the deck.')\n\n print('The same applies when it is the computer\\'s turn as well.')\n\n print('When asking for a card, type in either \"Ace\", \"Jack\", \"Queen\", \"King\", \"2\", \"3\", etc.')\n\n print()\n\n print('The end goal is to try to have more books than the computer!')\n\n print('Good luck!')\n\n print()\n\n def go_fish_card_sorter():\n\n go_fish_entire_card_deck = []\n\n go_fish_cards_list_one = ['Ace', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'Jack', 'Queen', 'King']\n\n go_fish_cards_list_two = ['Ace', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'Jack', 'Queen', 'King']\n\n go_fish_cards_list_three = ['Ace', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'Jack', 'Queen', 'King']\n\n go_fish_cards_list_four = ['Ace', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'Jack', 'Queen', 'King']\n\n for card in go_fish_cards_list_one:\n\n go_fish_entire_card_deck.append(card)\n\n for card in go_fish_cards_list_two:\n\n go_fish_entire_card_deck.append(card)\n\n for card in go_fish_cards_list_three:\n\n go_fish_entire_card_deck.append(card)\n\n for card in go_fish_cards_list_four:\n\n go_fish_entire_card_deck.append(card)\n\n go_fish_times_shuffled = int(input('How many times do you want the deck to be shuffled? '))\n\n print('The deck will be shuffled {} time(s)!'.format(go_fish_times_shuffled))\n\n print()\n\n while go_fish_times_shuffled > 0:\n\n random.shuffle(go_fish_entire_card_deck)\n\n go_fish_times_shuffled -= 1\n\n go_fish_valid_requests = []\n\n for card in go_fish_cards_list_one:\n\n go_fish_valid_requests.append(card)\n\n for card in go_fish_cards_list_two:\n\n go_fish_valid_requests.append(card)\n\n for card in go_fish_cards_list_three:\n\n go_fish_valid_requests.append(card)\n\n for card in go_fish_cards_list_four:\n\n go_fish_valid_requests.append(card)\n\n return go_fish_entire_card_deck, go_fish_valid_requests\n\n def go_fish_card_dealer(total_deck):\n\n go_fish_card_sort_counter = 0\n\n go_fish_remaining_deck = []\n\n go_fish_computer_deck = []\n\n go_fish_user_deck = []\n\n while go_fish_card_sort_counter < 7:\n\n go_fish_computer_deck.append(total_deck[0])\n\n total_deck.remove(total_deck[0])\n\n go_fish_user_deck.append(total_deck[0])\n\n total_deck.remove(total_deck[0])\n\n go_fish_card_sort_counter += 1\n\n for card in total_deck:\n\n go_fish_remaining_deck.append(card)\n\n return go_fish_remaining_deck, go_fish_computer_deck, go_fish_user_deck\n\n def go_fish_gameplay(card_pile, computer_hand, user_hand, valid_requests):\n\n go_fish_computer_matches = []\n\n go_fish_user_matches = []\n\n # Checking For Computer Matches\n\n for card in computer_hand:\n\n go_fish_matches_counter = computer_hand.count(card)\n\n if go_fish_matches_counter > 3:\n\n go_fish_computer_matches.append(card)\n\n go_fish_match_dictionary['Computer Books'] += 0.25\n\n for card in go_fish_computer_matches:\n\n if card in computer_hand:\n\n computer_hand.remove(card)\n\n for card in go_fish_computer_matches:\n\n if card in valid_requests:\n\n valid_requests.remove(card)\n\n # Checking For User Matches\n\n for card in user_hand:\n\n go_fish_matches_counter = user_hand.count(card)\n\n if go_fish_matches_counter > 3:\n\n go_fish_user_matches.append(card)\n\n go_fish_match_dictionary['User Books'] += 0.25\n\n for card in go_fish_user_matches:\n\n if card in user_hand:\n\n user_hand.remove(card)\n\n for card in go_fish_user_matches:\n\n if card in valid_requests:\n\n valid_requests.remove(card)\n\n # Main Game Loop\n\n print('Starting Hands:')\n\n print('User Hand:')\n\n print(user_hand)\n\n print()\n\n print('Books:')\n\n print('User Books:')\n\n print(go_fish_user_matches)\n\n print()\n\n print('Computer Books:')\n\n print(go_fish_computer_matches)\n\n print()\n\n while True:\n\n if (len(user_hand) == 0) and (len(computer_hand) == 0) and (len(card_pile) == 0):\n\n print('All of the cards have been booked!')\n\n break\n\n # Getting The User's Card Request\n\n while True:\n\n if len(user_hand) == 0:\n\n print('You have no cards, so you draw!')\n\n print()\n\n user_hand.append(card_pile[0])\n\n card_pile.remove(card_pile[0])\n\n break\n\n else:\n\n go_fish_user_request = input('Which card(s) do you want to ask the computer for? ')\n\n print()\n\n if (go_fish_user_request.capitalize() in valid_requests) and (go_fish_user_request.capitalize() in computer_hand):\n\n print('The computer had that card! It was added to your hand!')\n\n print()\n\n for card in computer_hand:\n\n if card == go_fish_user_request.capitalize():\n\n user_hand.append(card)\n\n computer_hand = [i for i in computer_hand if i != go_fish_user_request.capitalize()]\n\n if len(user_hand) == 0:\n\n print('You are out of cards, so you draw a card!')\n\n user_hand.append(card_pile[0])\n\n card_pile.remove(card_pile[0])\n\n break\n\n elif (go_fish_user_request.capitalize() in valid_requests) and (go_fish_user_request.capitalize() not in computer_hand):\n\n print('Go Fish!')\n\n print('You drew a card!')\n\n print()\n\n user_hand.append(card_pile[0])\n\n card_pile.remove(card_pile[0])\n\n break\n\n elif (go_fish_user_request.capitalize() in go_fish_user_matches) or (go_fish_user_request.capitalize() in go_fish_computer_matches):\n\n print('This card already has it\\'s book made!')\n\n else:\n\n print('Please enter a valid card!')\n\n print()\n\n # Checking For User Matches\n\n for card in user_hand:\n\n go_fish_matches_counter = user_hand.count(card)\n\n if go_fish_matches_counter > 3:\n\n go_fish_user_matches.append(card)\n\n go_fish_match_dictionary['User Books'] += 0.25\n\n for card in go_fish_user_matches:\n\n if card in user_hand:\n\n user_hand.remove(card)\n\n for card in go_fish_user_matches:\n\n if card in valid_requests:\n\n valid_requests.remove(card)\n\n # Computer Turn\n\n print('Updated Hands:')\n\n print('User Hand:')\n\n print(user_hand)\n\n print()\n\n print('Books:')\n\n print('User Books:')\n\n print(go_fish_user_matches)\n\n print()\n\n print('Computer Books:')\n\n print(go_fish_computer_matches)\n\n print()\n\n if (len(user_hand) == 0) and (len(computer_hand) == 0) and (len(card_pile) == 0):\n\n print('All of the cards have been booked!')\n\n break\n\n if len(computer_hand) == 0:\n\n print('The computer has no cards in it\\'s hand, so it draws a card!')\n\n print()\n\n computer_hand.append(card_pile[0])\n\n card_pile.remove(card_pile[0])\n\n else:\n\n go_fish_computer_turn = random.choice(computer_hand)\n\n print('The computer asked for a ' + go_fish_computer_turn + '!')\n\n print()\n\n if go_fish_computer_turn in user_hand:\n\n print('The computer took your card(s)!')\n\n print()\n\n for card in user_hand:\n\n if card == go_fish_computer_turn:\n\n computer_hand.append(card)\n\n user_hand = [i for i in user_hand if i != go_fish_computer_turn]\n\n if len(computer_hand) == 0:\n\n print('The computer is out of cards, so it draws a card!')\n\n computer_hand.append(card_pile[0])\n\n card_pile.remove(card_pile[0])\n\n elif go_fish_computer_turn not in user_hand:\n\n print('The computer had to Go Fish!')\n\n print()\n\n computer_hand.append(card_pile[0])\n\n card_pile.remove(card_pile[0])\n\n # Checking For Computer Matches\n\n for card in computer_hand:\n\n go_fish_matches_counter = computer_hand.count(card)\n\n if go_fish_matches_counter > 3:\n\n go_fish_computer_matches.append(card)\n\n go_fish_match_dictionary['Computer Books'] += 0.25\n\n for card in go_fish_computer_matches:\n\n if card in computer_hand:\n\n computer_hand.remove(card)\n\n for card in go_fish_computer_matches:\n\n if card in valid_requests:\n\n valid_requests.remove(card)\n\n print('Updated Hands:')\n\n print('User Hand:')\n\n print(user_hand)\n\n print()\n\n print('Books:')\n\n print('User Books:')\n\n print(go_fish_user_matches)\n\n print()\n\n print('Computer Books:')\n\n print(go_fish_computer_matches)\n\n print()\n\n print()\n\n print('Here\\'s the end of game statistics:')\n\n print()\n\n print('Books:')\n\n print('User Books:')\n\n print(go_fish_user_matches)\n\n print()\n\n print('Computer Books:')\n\n print(go_fish_computer_matches)\n\n print()\n\n print(go_fish_match_dictionary)\n\n print()\n\n if len(go_fish_user_matches) > len(go_fish_computer_matches):\n\n print('You had more books! So you won the game!')\n\n print()\n\n go_fish_score_dictionary['Wins'] += 1\n\n elif len(go_fish_user_matches) < len(go_fish_computer_matches):\n\n print('You had less books than the computer, so you lost!')\n\n print()\n\n go_fish_score_dictionary['Losses'] += 1\n\n else:\n\n print('You had the same number of books as the computer, so it\\'s a tie!')\n\n print()\n\n go_fish_score_dictionary['Ties'] += 1\n\n def go_fish_replay():\n\n print('Here\\'s the score:')\n\n print(go_fish_score_dictionary)\n\n while True:\n\n print()\n\n go_fish_replay_request = input('Would you like to play again/ (\"Yes\"/\"No\"): ')\n\n if go_fish_replay_request.upper() == 'YES':\n\n print('Okay! Let\\'s go!')\n\n break\n\n elif go_fish_replay_request.upper() == 'NO':\n\n print('Okay, bye!')\n\n game_selector_hub()\n\n else:\n\n print('Please type either \"Yes\" or \"No\"!')\n\n go_fish_rules()\n\n while True:\n\n go_fish_total_deck = go_fish_card_sorter()\n\n go_fish_gameplay_decks = go_fish_card_dealer(go_fish_total_deck[0])\n\n go_fish_gameplay(go_fish_gameplay_decks[0], go_fish_gameplay_decks[1], go_fish_gameplay_decks[2],go_fish_total_deck[1])\n\n go_fish_score_dictionary['Games Played'] += 1\n\n go_fish_replay()\n\n########################################################################################################################\n\n\ndef uno_game():\n\n uno_score = {'User Wins': 0, 'Computer Wins': 0}\n\n def uno_game_greeting():\n\n print()\n\n print('Welcome to Uno! Try to be the first player to get rid of all of the cards in your hand!')\n\n print()\n\n print('The standard Uno rules apply, meaning that you can only play the same color card or the same type of card such as the same number or symbol.')\n\n print('However, wild cards can be played whenever, regardless of which card is on the top of the discard pile.')\n\n print()\n\n print('The deck will be shuffled before the game begins and will also be reshuffled in the event that the draw pile runs out of cards.')\n\n print()\n\n print('IMPORTANT NOTE: When playing a card be sure to type it out EXACTLY as it is displayed in your hand. I haven\\'t implemented a way to check for different cases in the user input yet.')\n\n print()\n\n print('Good Luck and Have Fun!')\n\n print()\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n def uno_deck_maker():\n\n uno_zero_cards = ['Red_0', 'Blue_0', 'Green_0', 'Yellow_0']\n\n uno_red_number_cards = ['Red_1', 'Red_2', 'Red_3', 'Red_4', 'Red_5', 'Red_6', 'Red_7', 'Red_8', 'Red_9']\n\n uno_red_number_cards_2 = uno_red_number_cards\n\n uno_red_action_cards = ['Red_Skip', 'Red_Reverse', 'Red_Draw_2+']\n\n uno_red_action_cards_2 = uno_red_action_cards\n\n red_cards = uno_red_number_cards + uno_red_number_cards_2 + uno_red_action_cards + uno_red_action_cards_2\n\n blue_cards = []\n\n for card in red_cards:\n\n x = card.replace('Red', 'Blue')\n\n blue_cards.append(x)\n\n green_cards = []\n\n for card in red_cards:\n\n x = card.replace('Red', 'Green')\n\n green_cards.append(x)\n\n yellow_cards = []\n\n for card in red_cards:\n\n x = card.replace('Red', 'Yellow')\n\n yellow_cards.append(x)\n\n wild_cards = ['Wild_Card', 'Wild_Card', 'Wild_Card', 'Wild_Card', 'Wild_Draw_4', 'Wild_Draw_4', 'Wild_Draw_4','Wild_Draw_4']\n\n uno_total_deck = uno_zero_cards + red_cards + blue_cards + green_cards + yellow_cards + wild_cards\n\n return uno_total_deck\n\n def uno_deck_shuffler(deck):\n\n print()\n\n break_out_flag = False\n\n while True:\n\n if break_out_flag:\n\n break\n\n user_shuffle_input = input('Type \"0\" to pick how many times the deck is shuffled or type \"1\" to have it randomly shuffled. ')\n\n print()\n\n if user_shuffle_input.isdigit():\n\n if user_shuffle_input == '0':\n\n while True:\n\n user_shuffle_number = input('Enter how many times you want the deck shuffled: ')\n\n print()\n\n if user_shuffle_number.isdigit():\n\n times_shuffled = int(user_shuffle_number)\n\n print('The deck will be shuffled {} time(s)!'.format(times_shuffled))\n\n print()\n\n break_out_flag = True\n\n break\n\n else:\n\n print('Please enter an integer!')\n\n print()\n\n elif user_shuffle_input == '1':\n\n times_shuffled = random.randint(1, 10)\n\n print('The deck will be shuffled {} time(s)!'.format(times_shuffled))\n\n print()\n\n break\n\n else:\n\n print('You entered a number, but not a \"0\" or a \"1\".')\n\n print()\n\n else:\n\n print('Please type either a \"0\" or a \"1\"! You entered a word!')\n\n print()\n\n while times_shuffled > 0:\n\n random.shuffle(deck)\n\n times_shuffled -= 1\n\n print('The deck was shuffled!')\n\n print()\n\n print()\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n print()\n\n return deck\n\n def card_dealer(game_deck):\n\n game_deck_actual = game_deck\n\n uno_user_hand = []\n\n uno_computer_hand = []\n\n cards_dealt = 7\n\n while cards_dealt > 0:\n\n uno_user_hand.append(game_deck_actual[0])\n\n game_deck.remove(game_deck_actual[0])\n\n uno_computer_hand.append(game_deck_actual[0])\n\n game_deck.remove(game_deck_actual[0])\n\n cards_dealt -= 1\n\n return uno_user_hand, uno_computer_hand, game_deck_actual\n\n def gameplay_function(user_hand, computer_hand, draw_pile):\n\n discard_pile = []\n\n discard_pile.append(draw_pile[0])\n\n draw_pile.remove(draw_pile[0])\n\n print('Let\\'s toss a coin to see who goes first.')\n\n print()\n\n coin_toss_list = ['HEADS', 'TALES']\n\n coin_toss_value = random.choice(coin_toss_list)\n\n while True:\n\n user_coin_value = input('Type either \"heads\" or \"tales\": ')\n\n print()\n\n if user_coin_value.upper() not in coin_toss_list:\n\n print('You didn\\'t type \"heads\" or \"tales\"!')\n\n print()\n\n else:\n\n if user_coin_value.upper() == coin_toss_value:\n\n print('You won the coin toss!')\n\n print()\n\n user_turn = 1\n\n break\n\n else:\n\n print('The computer won the coin toss!')\n\n print()\n\n user_turn = 2\n\n break\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n while True:\n\n user_invalid_card = False\n\n computer_invalid_card = False\n\n user_card_chosen_flag = False\n\n if (len(user_hand) == 0) or (len(computer_hand) == 0):\n\n if len(user_hand) == 0:\n\n print('You\\'re out of cards!')\n\n uno_score['User Wins'] += 1\n\n break\n\n else:\n\n print('The computer is out of cards!')\n\n uno_score['Computer Wins'] += 1\n\n break\n\n if len(draw_pile) == 0:\n\n print('The draw pile is out of cards, we\\'ll reshuffle the cards in the discard pile and continue play.')\n\n print()\n\n for card in discard_pile:\n\n draw_pile.append(card)\n\n discard_pile.remove(card)\n\n shuffle_number = random.randint(1, 10)\n\n while shuffle_number > 0:\n\n random.shuffle(draw_pile)\n\n shuffle_number -= 1\n\n discard_pile.append(draw_pile[0])\n\n draw_pile.remove(draw_pile[0])\n\n if user_turn % 2 != 0:\n\n print('---------------------------------------------------------------------------------------------------')\n\n print()\n\n print('Computer\\'s Hand:')\n\n computer_hand_hidden = []\n\n computer_hand_length = len(computer_hand)\n\n while computer_hand_length > 0:\n\n computer_hand_hidden.append('X')\n\n computer_hand_length -= 1\n\n print(computer_hand_hidden)\n\n print()\n\n print('Discard Pile Top Card:')\n\n print(discard_pile[0])\n\n print()\n\n print('Number of Cards In Draw Pile:')\n\n print(len(draw_pile))\n\n print()\n\n print('Your Hand:')\n\n print(user_hand)\n\n print()\n\n print('It\\'s your turn!')\n\n print()\n\n if discard_pile[0][0] == 'W':\n\n while True:\n\n user_choice = input('Enter any card since there\\'s a wild card in the discard pile! ')\n\n print()\n\n if user_choice not in user_hand:\n\n print('That is not a valid card to play!')\n\n print()\n\n else:\n\n user_card_chosen = user_choice\n\n discard_pile.insert(0, user_card_chosen)\n\n break\n\n else:\n\n user_choices = []\n\n for card in user_hand:\n\n if (card[0] == discard_pile[0][0]) or (card[0] == 'W') or (card[-1] == discard_pile[0][-1]):\n\n user_choices.append(card)\n\n if len(user_choices) == 0:\n\n print('You have no valid cards to play!')\n\n print('That means you draw from the deck!')\n\n print()\n\n user_hand.append(draw_pile[0])\n\n draw_pile.remove(draw_pile[0])\n\n user_turn += 1\n\n user_invalid_card = True\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n else:\n\n while True:\n\n user_choice = input('Enter the card that you want to play: ')\n\n print()\n\n if user_choice not in user_hand:\n\n print('That is not a valid card to play!')\n\n print()\n\n elif (user_choice[0] == discard_pile[0][0]) or (user_choice[0] == 'W') or (user_choice[-1] == discard_pile[0][-1]):\n\n user_card_chosen = user_choice\n\n user_choices.clear()\n\n user_card_chosen_flag = True\n\n break\n\n else:\n\n print('Type in a card with the matching color, the matching value or a wild card!')\n\n print()\n\n discard_pile.insert(0, user_card_chosen)\n\n if user_card_chosen_flag:\n\n for card in user_hand:\n\n if card == user_card_chosen:\n\n user_hand.remove(card)\n\n uno_action_cards = ['Wild_Card', 'Wild_Draw_4', 'Red_Skip', 'Red_Reverse', 'Red_Draw_2+', 'Blue_Skip',\n 'Blue_Reverse', 'Blue_Draw_2+', 'Green_Skip', 'Green_Reverse', 'Green_Draw_2+',\n 'Yellow_Skip', 'Yellow_Reverse', 'Yellow_Draw_2+']\n\n if user_invalid_card:\n\n print('Your turn is over!')\n\n print()\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n elif user_card_chosen in uno_action_cards:\n\n if '4' in user_card_chosen:\n\n print('You made the computer draw four cards!')\n\n print()\n\n draw_4_counter = 4\n\n while draw_4_counter > 0:\n\n computer_hand.append(draw_pile[0])\n\n draw_pile.pop(0)\n\n draw_4_counter -= 1\n\n color_picker = ['RED', 'BLUE', 'GREEN', 'YELLOW']\n\n while True:\n\n print()\n\n wild_card_color = input('Enter a color to make the wild card: ')\n\n print()\n\n if wild_card_color.upper() not in color_picker:\n\n print('Enter a valid color!')\n\n else:\n\n break\n\n if wild_card_color.upper() == 'RED':\n\n print('You changed the color to red!')\n\n print()\n\n discard_pile.insert(0, wild_card_color.upper())\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n elif wild_card_color.upper() == 'BLUE':\n\n print('You changed the color to blue!')\n\n print()\n\n discard_pile.insert(0, wild_card_color.upper())\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n elif wild_card_color.upper() == 'GREEN':\n\n print('You changed the color to green!')\n\n print()\n\n discard_pile.insert(0, wild_card_color.upper())\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n else:\n\n print('You changed the color to yellow!')\n\n print()\n\n discard_pile.insert(0, wild_card_color.upper())\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n elif '2' in user_card_chosen:\n\n print('You made the computer draw two cards!')\n\n print()\n\n draw_2_counter = 2\n\n while draw_2_counter > 0:\n\n computer_hand.append(draw_pile[0])\n\n draw_pile.pop(0)\n\n draw_2_counter -= 1\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n elif 'Reverse' in user_card_chosen:\n\n print('The order was reversed, so it\\'s your turn again!')\n\n print()\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n elif 'Skip' in user_card_chosen:\n\n print('You skipped the computer\\'s turn!')\n\n print()\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n else:\n\n print('You played a wild card!')\n\n print()\n\n user_turn += 1\n\n color_picker = ['RED', 'BLUE', 'GREEN', 'YELLOW']\n\n while True:\n\n print()\n\n wild_card_color = input('Enter a color to make the wild card: ')\n\n print()\n\n if wild_card_color.upper() not in color_picker:\n\n print('Enter a valid color!')\n\n else:\n\n break\n\n if wild_card_color.upper() == 'RED':\n\n print('You changed the color to red!')\n\n print()\n\n discard_pile.insert(0, wild_card_color.upper())\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n elif wild_card_color.upper() == 'BLUE':\n\n print('You changed the color to blue!')\n\n print()\n\n discard_pile.insert(0, wild_card_color.upper())\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n elif wild_card_color.upper() == 'GREEN':\n\n print('You changed the color to green!')\n\n print()\n\n discard_pile.insert(0, wild_card_color.upper())\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n else:\n\n print('You changed the color to yellow!')\n\n print()\n\n discard_pile.insert(0, wild_card_color.upper())\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n else:\n\n print('You played your card!')\n\n print()\n\n user_turn += 1\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n if len(user_hand) == 1:\n print('You have one card left, UNO!')\n\n print()\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n if (len(user_hand) == 0) or (len(computer_hand) == 0):\n\n if len(user_hand) == 0:\n\n print('You\\'re out of cards!')\n\n uno_score['User Wins'] += 1\n\n break\n\n else:\n\n print('The computer is out of cards!')\n\n uno_score['Computer Wins'] += 1\n\n break\n\n if len(draw_pile) == 0:\n\n print(\n 'The draw pile is out of cards, we\\'ll reshuffle the cards in the discard pile and continue play.')\n\n print()\n\n for card in discard_pile:\n\n draw_pile.append(card)\n\n discard_pile.remove(card)\n\n shuffle_number = random.randint(1, 10)\n\n while shuffle_number > 0:\n\n random.shuffle(draw_pile)\n\n shuffle_number -= 1\n\n discard_pile.append(draw_pile[0])\n\n draw_pile.remove(draw_pile[0])\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n if user_turn % 2 == 0:\n\n print('---------------------------------------------------------------------------------------------------')\n\n print()\n\n print('Computer\\'s Hand:')\n\n computer_hand_hidden = []\n\n computer_hand_length = len(computer_hand)\n\n while computer_hand_length > 0:\n\n computer_hand_hidden.append('X')\n\n computer_hand_length -= 1\n\n print(computer_hand_hidden)\n\n print()\n\n print('Discard Pile Top Card:')\n\n print(discard_pile[0])\n\n print()\n\n print('Number of Cards In Draw Pile:')\n\n print(len(draw_pile))\n\n print()\n\n print('Your Hand:')\n\n print(user_hand)\n\n print()\n\n print('It\\'s the computer\\'s turn!')\n\n print()\n\n computer_card_choices = []\n\n if discard_pile[0][0] == 'W':\n\n for card in computer_hand:\n\n computer_card_choices.append(card)\n\n else:\n\n for card in computer_hand:\n\n if (card[0] == discard_pile[0][0]) or (card[0] == 'W') or (card[-1] == discard_pile[0][-1]):\n\n computer_card_choices.append(card)\n\n if len(computer_card_choices) == 0:\n\n print('The computer doesn\\'t have a card to play so it draws from the deck!')\n\n print()\n\n computer_hand.append(draw_pile[0])\n\n draw_pile.remove(draw_pile[0])\n\n computer_hand_hidden.append('X')\n\n user_turn += 1\n\n computer_invalid_card = True\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n else:\n\n computer_card_chosen = random.choice(computer_card_choices)\n\n computer_card_choices.clear()\n\n discard_pile.insert(0, computer_card_chosen)\n\n computer_hand_hidden.pop(0)\n\n for card in computer_hand:\n\n if card == computer_card_chosen:\n\n computer_hand.remove(card)\n\n uno_action_cards = ['Wild_Card', 'Wild_Draw_4', 'Red_Skip', 'Red_Reverse', 'Red_Draw_2+',\n 'Blue_Skip', 'Blue_Reverse', 'Blue_Draw_2+', 'Green_Skip', 'Green_Reverse',\n 'Green_Draw_2+', 'Yellow_Skip', 'Yellow_Reverse', 'Yellow_Draw_2+']\n\n if computer_invalid_card:\n\n print('The computer\\'s turn is over!')\n\n print()\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n elif computer_card_chosen in uno_action_cards:\n\n if '4' in computer_card_chosen:\n\n print('The computer made you draw four cards!')\n\n print()\n\n draw_4_counter = 4\n\n while draw_4_counter > 0:\n\n user_hand.append(draw_pile[0])\n\n draw_pile.pop(0)\n\n draw_4_counter -= 1\n\n color_picker = ['RED', 'BLUE', 'GREEN', 'YELLOW']\n\n wild_card_color = random.choice(color_picker)\n\n if wild_card_color == 'RED':\n\n print('The computer changed the color to red!')\n\n print()\n\n discard_pile.insert(0, wild_card_color)\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n elif wild_card_color == 'BLUE':\n\n print('The computer changed the color to blue!')\n\n print()\n\n discard_pile.insert(0, wild_card_color)\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n elif wild_card_color == 'GREEN':\n\n print('The computer changed the color to green!')\n\n print()\n\n discard_pile.insert(0, wild_card_color)\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n else:\n\n print('The computer changed the color to yellow!')\n\n print()\n\n discard_pile.insert(0, wild_card_color)\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n elif '2' in computer_card_chosen:\n\n print('The computer made you draw two cards!')\n\n print()\n\n draw_2_counter = 2\n\n while draw_2_counter > 0:\n\n user_hand.append(draw_pile[0])\n\n draw_pile.pop(0)\n\n draw_2_counter -= 1\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n elif 'Reverse' in computer_card_chosen:\n\n print('The order was reversed, so it\\'s the computer\\'s turn again!')\n\n print()\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n elif 'Skip' in computer_card_chosen:\n\n print('The computer skipped your turn!')\n\n print()\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n else:\n\n print('The computer played a wild card!')\n\n print()\n\n user_turn += 1\n\n color_picker = ['RED', 'BLUE', 'GREEN', 'YELLOW']\n\n wild_card_color = random.choice(color_picker)\n\n if wild_card_color == 'RED':\n\n print('The computer changed the color to red!')\n\n print()\n\n discard_pile.insert(0, wild_card_color)\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n elif wild_card_color == 'BLUE':\n\n print('The computer changed the color to blue!')\n\n print()\n\n discard_pile.insert(0, wild_card_color)\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n elif wild_card_color == 'GREEN':\n\n print('The computer changed the color to green!')\n\n print()\n\n discard_pile.insert(0, wild_card_color)\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n else:\n\n print('The computer changed the color to yellow!')\n\n print()\n\n discard_pile.insert(0, wild_card_color)\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n else:\n\n print('The computer played its card!')\n\n print()\n\n user_turn += 1\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n if len(computer_hand) == 1:\n\n print('The computer has one card left, UNO!')\n\n print()\n\n input('Press \"Enter\" to continue: ')\n\n print()\n\n def uno_replay():\n\n print()\n\n print('Here\\'s the score:')\n\n print(uno_score)\n\n print()\n\n while True:\n\n print()\n\n uno_user_replay = input('Would you like to play again? (\"Yes\"/\"No\") ')\n\n if uno_user_replay.lower() == 'yes':\n\n print('Okay, let\\'s play again!')\n\n print()\n\n break\n\n elif uno_user_replay.lower() == 'no':\n\n print('Okay, thanks for playing!')\n\n game_selector_hub()\n\n else:\n\n print('Please type either \"Yes\" or \"No\"!')\n\n uno_game_greeting()\n\n while True:\n\n uno_completed_deck = uno_deck_maker()\n\n shuffled_deck = uno_deck_shuffler(uno_completed_deck)\n\n gameplay_decks = card_dealer(shuffled_deck)\n\n gameplay_function(gameplay_decks[0], gameplay_decks[1], gameplay_decks[2])\n\n uno_replay()\n\n#######################################################################################################################\n\n\ndef war_game():\n\n card_value_dictionary = {'Joker': 14, 'Ace': 13, 'King': 12, 'Queen': 11, 'Jack': 10, '10': 9, '9': 8, '8': 7,\n '7': 6, '6': 5, '5': 4, '4': 3, '3': 2, '2': 1}\n\n war_score = {'User Wins': 0, 'Computer Wins': 0, 'Games Played': 0}\n\n def war_rules():\n\n print()\n\n print('Welcome to War! Be the first player to collect all 54 cards!')\n\n print()\n\n print('Each player will compare their first card in their hand with each other. The player with the highest '\n 'value card wins and that player gets both cards put into their card pile.')\n\n print('If both cards are the same value then their is a WAR!')\n\n print('Once the WAR starts the two compared cards, along with the next three cards from each player\\'s hand '\n 'will be added to the WAR pile.')\n\n print('The two players will then compare their next card and the player with the highest value card wins the '\n 'WAR and gets all of the cards in the WAR pile.')\n\n print('In the event that a player does not have enough cards for a WAR, the other player wins automatically.')\n\n print('If both players do not have enough cards for a WAR, then the player with the most cards wins.')\n\n print('Lastly, if both players have the same amount of cards and cannot have a WAR, the last card in the '\n 'player hand is compared to the other player, with the highest value being the winner.')\n\n print('In the event of a tie in this case, the cards will all be shuffled and dealt out again.')\n\n print()\n\n print('Good luck!')\n\n print()\n\n input('Press \"enter\" to continue: ')\n\n print()\n\n def create_deck():\n\n created_deck = []\n\n counter = 4\n\n card_suite = ['Ace', 'King', 'Queen', 'Jack', '10', '9', '8', '7', '6', '5', '4', '3', '2']\n\n jokers = ['Joker', 'Joker']\n\n while counter > 0:\n\n for card in card_suite:\n\n created_deck.append(card)\n\n if len(created_deck) % 13 == 0:\n\n counter -= 1\n\n for card in jokers:\n\n created_deck.append(card)\n\n return created_deck\n\n def deck_shuffler(deck):\n\n shuffle_number = 0\n\n user_pick_shuffle = False\n\n random_shuffle = False\n\n while True:\n\n shuffle_choice = input('Type \"0\" to pick how many times the deck is shuffled or type \"1\" to have it '\n 'shuffled randomly: ')\n\n if shuffle_choice == '0':\n\n user_pick_shuffle = True\n\n break\n\n elif shuffle_choice == '1':\n\n random_shuffle = True\n\n break\n\n else:\n\n print('Please type either a \"0\" or a \"1\".')\n\n print()\n\n if user_pick_shuffle:\n\n while True:\n\n times_shuffled = input('Enter how many times to shuffle the deck: ')\n\n if times_shuffled.isdigit():\n\n if times_shuffled == '0':\n\n print('Type in a number greater than 1.')\n\n print()\n\n else:\n\n shuffle_number = int(times_shuffled)\n\n print('The deck will be shuffled {} time(s).'.format(shuffle_number))\n\n print()\n\n break\n\n else:\n\n print('Please enter a valid integer.')\n\n print()\n\n elif random_shuffle:\n\n shuffle_number = random.randint(1, 10)\n\n print('The deck will be shuffled {} time(s).'.format(shuffle_number))\n\n print()\n\n while shuffle_number > 0:\n\n random.shuffle(deck)\n\n shuffle_number -= 1\n\n input('Press \"enter\" to continue: ')\n\n print()\n\n return deck\n\n def deck_dealer(deck):\n\n user_hand = deck[0::2]\n\n computer_hand = deck[1::2]\n\n return user_hand, computer_hand\n\n def gameplay_function(user_hand, computer_hand):\n\n turn = 1\n\n user_card_pile = []\n\n computer_card_pile = []\n\n war_card_list = []\n\n war_card_shuffle = []\n\n while True:\n\n print('Turn: {}'.format(turn))\n\n print('-------------------------------------------------------------------------------------------------------')\n\n print()\n\n print('Computer Info:')\n\n print('Computer\\'s Current Card: {}'.format(computer_hand[0]))\n\n print('Number of Cards in Computer\\'s Pile: {}'.format(len(computer_card_pile)))\n\n print('Number of Cards in The Computer\\'s Hand: {}'.format(len(computer_hand)))\n\n print()\n\n print('User Info:')\n\n print('Your Current Card: {}'.format(user_hand[0]))\n\n print('Number of Cards in Your Pile: {}'.format(len(user_card_pile)))\n\n print('Number of Cards in Your Hand: {}'.format(len(user_hand)))\n\n print()\n\n print('-------------------------------------------------------------------------------------------------------')\n\n # input('Press enter to continue: ')\n\n print()\n\n if card_value_dictionary[computer_hand[0]] < card_value_dictionary[user_hand[0]]:\n\n print('Your card has a higher value, so you get both cards.')\n\n print()\n\n user_card_pile.append(user_hand[0])\n\n user_hand.remove(user_hand[0])\n\n user_card_pile.append(computer_hand[0])\n\n computer_hand.remove(computer_hand[0])\n\n turn += 1\n\n # input('Press enter to continue: ')\n\n print()\n\n elif card_value_dictionary[computer_hand[0]] > card_value_dictionary[user_hand[0]]:\n\n print('The computer\\'s card has a higher value, so it gets both cards.')\n\n print()\n\n computer_card_pile.append(user_hand[0])\n\n user_hand.remove(user_hand[0])\n\n computer_card_pile.append(computer_hand[0])\n\n computer_hand.remove(computer_hand[0])\n\n turn += 1\n\n # input('Press enter to continue: ')\n\n print()\n\n else:\n\n print('You and the computer have the same value card, so it\\'s WAR!')\n\n print()\n\n print('The cards that were compared, along with three other cards from each players hand, will be put '\n 'into the war pile for the winner to collect.')\n\n print()\n\n # input('Press enter to continue: ')\n\n print()\n\n while True:\n\n war_counter = 3\n\n war_card_list.append(user_hand[0])\n\n user_hand.remove(user_hand[0])\n\n war_card_list.append(computer_hand[0])\n\n computer_hand.remove(computer_hand[0])\n\n if len(user_hand) == 0:\n\n print('You ran out of cards!')\n\n print('The cards in your card pile will be put into your hand and shuffled a random amount of '\n 'times.')\n\n print()\n\n for card in user_card_pile:\n\n user_hand.append(card)\n\n user_card_pile.clear()\n\n shuffle_number = random.randint(1, 10)\n\n print('Your new hand was shuffled {} time(s).'.format(shuffle_number))\n\n while shuffle_number > 0:\n\n random.shuffle(user_hand)\n\n shuffle_number -= 1\n\n print()\n\n # input('Press enter to continue: ')\n\n print()\n\n if len(computer_hand) == 0:\n\n print('The computer ran out of cards!')\n\n print('The cards in the computer\\'s card pile will be put into its hand and shuffled a random '\n 'amount of times.')\n\n print()\n\n for card in computer_card_pile:\n\n computer_hand.append(card)\n\n computer_card_pile.clear()\n\n shuffle_number = random.randint(1, 10)\n\n print('The computer\\'s new hand was shuffled {} time(s).'.format(shuffle_number))\n\n while shuffle_number > 0:\n\n random.shuffle(computer_hand)\n\n shuffle_number -= 1\n\n print()\n\n # input('Press enter to continue: ')\n\n print()\n\n if len(user_hand) < 4 and len(computer_hand) < 4:\n\n print('You and the computer do not have enough cards for a WAR!')\n\n print('So, I will determine who wins based on the number of cards in the hands.')\n\n print()\n\n if len(user_hand) < len(computer_hand):\n\n print('The computer has more cards, so it wins automatically.')\n\n print()\n\n for card in user_hand:\n\n computer_card_pile.append(card)\n\n user_hand.clear()\n\n for card in war_card_list:\n\n computer_card_pile.append(card)\n\n war_card_list.clear()\n\n for card in computer_hand:\n\n computer_card_pile.append(card)\n\n computer_hand.clear()\n\n break\n\n elif len(user_hand) > len(computer_hand):\n\n print('You have more cards than the computer, so you win automatically.')\n\n print()\n\n for card in computer_hand:\n\n user_card_pile.append(card)\n\n computer_hand.clear()\n\n for card in war_card_list:\n\n user_card_pile.append(card)\n\n war_card_list.clear()\n\n for card in user_hand:\n\n user_card_pile.append(card)\n\n user_hand.clear()\n\n break\n\n else:\n\n print(\n 'You and the computer have the same amount of cards, so we will compare the last cards.')\n\n print()\n\n if card_value_dictionary[user_hand[-1]] > card_value_dictionary[computer_hand[-1]]:\n\n print('Your last card has a higher value than the computer\\'s, so you win the WAR!')\n\n print()\n\n for card in computer_hand:\n\n user_card_pile.append(card)\n\n computer_hand.clear()\n\n for card in war_card_list:\n\n user_card_pile.append(card)\n\n war_card_list.clear()\n\n for card in user_hand:\n\n user_card_pile.append(card)\n\n user_hand.clear()\n\n break\n\n elif card_value_dictionary[user_hand[-1]] < card_value_dictionary[computer_hand[-1]]:\n\n print('The computer\\'s last card has a higher value, so it wins the WAR!')\n\n print()\n\n for card in user_hand:\n\n computer_card_pile.append(card)\n\n user_hand.clear()\n\n for card in war_card_list:\n\n computer_card_pile.append(card)\n\n war_card_list.clear()\n\n for card in computer_hand:\n\n computer_card_pile.append(card)\n\n computer_hand.clear()\n\n break\n\n else:\n\n print('Since the last cards also have the same value the cards will be shuffled and '\n 'redistributed.')\n\n print()\n\n for card in user_hand:\n\n war_card_shuffle.append(card)\n\n user_hand.clear()\n\n for card in computer_hand:\n\n war_card_shuffle.append(card)\n\n computer_hand.clear()\n\n for card in war_card_list:\n\n war_card_shuffle.append(card)\n\n war_card_list.clear()\n\n war_shuffle = random.randint(1, 10)\n\n print('The cards will be shuffled {} time(s).'.format(war_shuffle))\n\n print()\n\n while war_shuffle > 0:\n\n random.shuffle(war_card_shuffle)\n\n war_shuffle -= 1\n\n user_hand = war_card_shuffle[0::2]\n\n computer_hand = war_card_shuffle[1::2]\n\n war_card_shuffle.clear()\n\n break\n\n elif len(user_hand) < 4:\n\n print('You do not have enough cards for the war, so the computer wins automatically.')\n\n print()\n\n for card in user_hand:\n\n computer_card_pile.append(card)\n\n user_hand.clear()\n\n for card in war_card_list:\n\n computer_card_pile.append(card)\n\n war_card_list.clear()\n\n x = 3\n\n while x > 0:\n\n computer_card_pile.append(computer_hand[0])\n\n computer_hand.pop(0)\n\n x -= 1\n\n break\n\n elif len(computer_hand) < 4:\n\n print('The computer does not have enough cards for the war, so you win automatically.')\n\n print()\n\n for card in computer_hand:\n\n user_card_pile.append(card)\n\n computer_hand.clear()\n\n for card in war_card_list:\n\n user_card_pile.append(card)\n\n war_card_list.clear()\n\n x = 3\n\n while x > 0:\n\n user_card_pile.append(user_hand[0])\n\n user_hand.pop(0)\n\n x -= 1\n\n break\n\n else:\n\n while war_counter > 0:\n\n war_card_list.append(user_hand[0])\n\n user_hand.remove(user_hand[0])\n\n war_card_list.append(computer_hand[0])\n\n computer_hand.remove(computer_hand[0])\n\n war_counter -= 1\n\n print('User Current Card: {}'.format(user_hand[0]))\n\n print('Computer Current Card: {}'.format(computer_hand[0]))\n\n print()\n\n if card_value_dictionary[computer_hand[0]] < card_value_dictionary[user_hand[0]]:\n\n print('Your card has a higher value, so you win the WAR!')\n\n print()\n\n for card in war_card_list:\n\n user_card_pile.append(card)\n\n war_card_list.clear()\n\n user_card_pile.append(user_hand[0])\n\n user_hand.remove(user_hand[0])\n\n user_card_pile.append(computer_hand[0])\n\n computer_hand.remove(computer_hand[0])\n\n break\n\n elif card_value_dictionary[computer_hand[0]] > card_value_dictionary[user_hand[0]]:\n\n print('The computer\\'s card has a higher value, so it wins the WAR!')\n\n print()\n\n for card in war_card_list:\n\n computer_card_pile.append(card)\n\n war_card_list.clear()\n\n computer_card_pile.append(user_hand[0])\n\n user_hand.remove(user_hand[0])\n\n computer_card_pile.append(computer_hand[0])\n\n computer_hand.remove(computer_hand[0])\n\n break\n\n else:\n\n print('You and the computer both have the same value card, so there is still a WAR!')\n\n print()\n\n print('The compared cards, along with three other cards from each player\\'s hand, will be '\n 'put into the war pile.')\n\n print()\n\n turn += 1\n\n # input('Press enter to continue: ')\n\n print()\n\n if len(user_card_pile) + len(user_hand) == 54:\n\n print('You got all of the cards! You WIN!')\n\n print()\n\n war_score['User Wins'] += 1\n\n war_score['Games Played'] += 1\n\n break\n\n if len(computer_card_pile) + len(computer_hand) == 54:\n\n print('The computer got all of the cards! The computer WINS!')\n\n print()\n\n war_score['Computer Wins'] += 1\n\n war_score['Games Played'] += 1\n\n break\n\n if len(user_hand) == 0:\n\n print('You ran out of cards!')\n\n print('The cards in your card pile will be put into your hand and shuffled a random amount of times.')\n\n print()\n\n for card in user_card_pile:\n\n user_hand.append(card)\n\n user_card_pile.clear()\n\n shuffle_number = random.randint(1, 10)\n\n print('Your new hand was shuffled {} time(s).'.format(shuffle_number))\n\n while shuffle_number > 0:\n\n random.shuffle(user_hand)\n\n shuffle_number -= 1\n\n print()\n\n # input('Press enter to continue: ')\n\n print()\n\n if len(computer_hand) == 0:\n\n print('The computer ran out of cards!')\n\n print('The cards in the computer\\'s card pile will be put into its hand and shuffled a random amount '\n 'of times.')\n\n print()\n\n for card in computer_card_pile:\n\n computer_hand.append(card)\n\n computer_card_pile.clear()\n\n shuffle_number = random.randint(1, 10)\n\n print('The computer\\'s new hand was shuffled {} time(s).'.format(shuffle_number))\n\n while shuffle_number > 0:\n\n random.shuffle(computer_hand)\n\n shuffle_number -= 1\n\n print()\n\n # input('Press enter to continue: ')\n\n print()\n\n def war_replay():\n\n print('Here is the score: ')\n\n print(war_score)\n\n print()\n\n while True:\n\n replay = input('Would you like to play again? (\"Yes\"/\"No\"): ')\n\n if replay.upper() == 'YES':\n\n print('Okay, let\\'s go again!')\n\n x = 60\n\n while x > 0:\n\n print()\n\n x -= 1\n\n break\n\n elif replay.upper() == 'NO':\n\n print('Okay, good game!')\n\n print()\n\n game_selector_hub()\n\n break\n\n else:\n\n print('Please type either \"Yes\" or \"No\".')\n\n print()\n\n war_rules()\n\n while True:\n\n total_deck = create_deck()\n\n shuffled_deck = deck_shuffler(total_deck)\n\n player_decks = deck_dealer(shuffled_deck)\n\n gameplay_function(player_decks[0], player_decks[1])\n\n war_replay()\n\n#######################################################################################################################\n\n\ndef connect_four_game():\n\n connect_four_score = {'User Wins': 0, 'Computer Wins': 0, 'Cats': 0}\n\n def connect_four_greet():\n\n print()\n\n print('Welcome to Connect Four!')\n\n print('Try to get four of your chips in a row!')\n\n print()\n\n print('Place your chip by choosing which column (1-7) you want your chip to fall into.')\n\n print('Your chips are denoted by \"O\" while the computer\\'s chips are denoted by an \"X\".')\n\n print()\n\n def connect_four_coin_toss():\n\n coin_value = random.randint(0, 1)\n\n print('We\\'ll toss a coin to see who goes first.')\n\n while True:\n\n user_guess = input('Input either \"heads\" or \"tales\" for your guess: ')\n\n if user_guess.upper() == 'HEADS':\n\n user_guess_value = 0\n\n break\n\n elif user_guess.upper() == 'TALES':\n\n user_guess_value = 1\n\n break\n\n else:\n\n print('That input is not \"heads\" or \"tales\"!')\n\n print()\n\n if user_guess_value == coin_value:\n\n print('You won the coin toss! That means that you go first!')\n\n print()\n\n user_turn_value = 0\n\n else:\n\n print('Your guess was incorrect! That means that the computer goes first!')\n\n print()\n\n user_turn_value = 1\n\n return user_turn_value\n\n def board_maker():\n\n board = []\n\n board_counter = 42\n\n while board_counter > 0:\n\n board.append(' ')\n\n board_counter -= 1\n\n return board\n\n def connect_four_gameplay(player_turn, board):\n\n user_id = 'user'\n\n user_player_symbol = 'O'\n\n computer_player_symbol = 'X'\n\n computer_id = 'computer'\n\n game_winner = False\n\n while True:\n\n if player_turn % 2 == 0:\n\n while True:\n\n while True:\n\n user_choice = input('Pick which column you want to put your chip into: ')\n\n if user_choice.isdigit():\n\n if 1 <= int(user_choice) <= 7:\n\n user_choice_valid = int(user_choice)\n\n print()\n\n break\n\n else:\n\n print('Please enter a number that corresponds to one of the columns.')\n\n print()\n\n else:\n\n print('Please enter a number that corresponds to one of the columns.')\n\n print()\n\n board_updated = position_verifier(user_choice_valid, board, user_id)\n\n if board_updated[1]:\n\n print('This column is full, please pick another column to put your chip in.')\n\n print()\n\n else:\n\n break\n\n board = board_updated[0]\n\n board_rows = 6\n\n position_offset = 0\n\n print(' 1 2 3 4 5 6 7')\n\n print('-' * 29)\n\n while board_rows > 0:\n print('|', board[0 + position_offset], '|', board[1 + position_offset], '|',\n board[2 + position_offset], '|', board[3 + position_offset], '|',\n board[4 + position_offset], '|', board[5 + position_offset], '|', board[6 + position_offset],\n '|')\n\n print('-' * 29)\n\n board_rows -= 1\n\n position_offset += 7\n\n game_winner = check_win(board, user_player_symbol, game_winner)\n\n if game_winner:\n\n player_win = 1\n\n break\n\n cat_counter = 0\n\n for place in board:\n\n if place != ' ':\n\n cat_counter += 1\n\n if cat_counter == 42:\n\n player_win = 0\n\n break\n\n player_turn += 1\n\n print('Your chip was placed!')\n\n print()\n\n input('Press \"enter\" to continue: ')\n\n print()\n\n else:\n\n while True:\n\n computer_choice_valid = random.randint(1, 7)\n\n board_updated = position_verifier(computer_choice_valid, board, computer_id)\n\n if board_updated[1]:\n\n pass\n\n else:\n\n break\n\n board = board_updated[0]\n\n board_rows = 6\n\n position_offset = 0\n\n print(' 1 2 3 4 5 6 7')\n\n print('-' * 29)\n\n while board_rows > 0:\n print('|', board[0 + position_offset], '|', board[1 + position_offset], '|',\n board[2 + position_offset], '|', board[3 + position_offset], '|',\n board[4 + position_offset], '|', board[5 + position_offset], '|', board[6 + position_offset],\n '|')\n\n print('-' * 29)\n\n board_rows -= 1\n\n position_offset += 7\n\n game_winner = check_win(board, computer_player_symbol, game_winner)\n\n if game_winner:\n\n player_win = 2\n\n break\n\n cat_counter = 0\n\n for place in board:\n\n if place != ' ':\n\n cat_counter += 1\n\n if cat_counter == 42:\n\n player_win = 0\n\n break\n\n print('The computer placed it\\'s chip.')\n\n print()\n\n player_turn += 1\n\n input('Press \"enter\" to continue: ')\n\n print()\n\n return player_win\n\n def position_verifier(player_choice, player_board, player_id):\n\n column_full = False\n\n spot_counter = 6\n\n spot_offset = 0\n\n if player_choice == 1:\n\n while spot_counter > 0:\n\n if player_board[0] != ' ':\n\n column_full = True\n\n break\n\n if player_board[35 - spot_offset] == ' ':\n\n if player_id == 'user':\n\n player_board[35 - spot_offset] = 'O'\n\n break\n\n elif player_id == 'computer':\n\n player_board[35 - spot_offset] = 'X'\n\n break\n\n else:\n\n spot_offset += 7\n\n spot_counter -= 1\n\n elif player_choice == 2:\n\n while spot_counter > 0:\n\n if player_board[1] != ' ':\n\n column_full = True\n\n break\n\n if player_board[36 - spot_offset] == ' ':\n\n if player_id == 'user':\n\n player_board[36 - spot_offset] = 'O'\n\n break\n\n elif player_id == 'computer':\n\n player_board[36 - spot_offset] = 'X'\n\n break\n\n else:\n\n spot_offset += 7\n\n spot_counter -= 1\n\n elif player_choice == 3:\n\n while spot_counter > 0:\n\n if player_board[2] != ' ':\n\n column_full = True\n\n break\n\n if player_board[37 - spot_offset] == ' ':\n\n if player_id == 'user':\n\n player_board[37 - spot_offset] = 'O'\n\n break\n\n elif player_id == 'computer':\n\n player_board[37 - spot_offset] = 'X'\n\n break\n\n else:\n\n spot_offset += 7\n\n spot_counter -= 1\n\n elif player_choice == 4:\n\n while spot_counter > 0:\n\n if player_board[3] != ' ':\n\n column_full = True\n\n break\n\n if player_board[38 - spot_offset] == ' ':\n\n if player_id == 'user':\n\n player_board[38 - spot_offset] = 'O'\n\n break\n\n elif player_id == 'computer':\n\n player_board[38 - spot_offset] = 'X'\n\n break\n\n else:\n\n spot_offset += 7\n\n spot_counter -= 1\n\n elif player_choice == 5:\n\n while spot_counter > 0:\n\n if player_board[4] != ' ':\n\n column_full = True\n\n break\n\n if player_board[39 - spot_offset] == ' ':\n\n if player_id == 'user':\n\n player_board[39 - spot_offset] = 'O'\n\n break\n\n elif player_id == 'computer':\n\n player_board[39 - spot_offset] = 'X'\n\n break\n\n else:\n\n spot_offset += 7\n\n spot_counter -= 1\n\n elif player_choice == 6:\n\n while spot_counter > 0:\n\n if player_board[5] != ' ':\n\n column_full = True\n\n break\n\n if player_board[40 - spot_offset] == ' ':\n\n if player_id == 'user':\n\n player_board[40 - spot_offset] = 'O'\n\n break\n\n elif player_id == 'computer':\n\n player_board[40 - spot_offset] = 'X'\n\n break\n\n else:\n\n spot_offset += 7\n\n spot_counter -= 1\n\n elif player_choice == 7:\n\n while spot_counter > 0:\n\n if player_board[6] != ' ':\n\n column_full = True\n\n break\n\n if player_board[41 - spot_offset] == ' ':\n\n if player_id == 'user':\n\n player_board[41 - spot_offset] = 'O'\n\n break\n\n elif player_id == 'computer':\n\n player_board[41 - spot_offset] = 'X'\n\n break\n\n else:\n\n spot_offset += 7\n\n spot_counter -= 1\n\n return player_board, column_full\n\n def check_win(board, player_symbol, winner_id):\n\n # Checking the rows\n\n row_counter = 6\n\n main_row_offset = 0\n\n while row_counter > 0:\n\n row_offset = 0\n\n while row_offset < 4:\n\n if (board[0 + main_row_offset + row_offset] == board[1 + main_row_offset + row_offset] ==\n board[2 + main_row_offset + row_offset] == board[3 + main_row_offset + row_offset] ==\n player_symbol):\n\n winner_id = True\n\n break\n\n else:\n\n row_offset += 1\n\n if winner_id:\n\n break\n\n else:\n\n main_row_offset += 7\n\n row_counter -= 1\n\n # Checking the columns\n\n column_counter = 7\n\n main_column_offset = 0\n\n while column_counter > 0:\n\n column_offset = 0\n\n while column_offset <= 14:\n\n if (board[0 + column_offset + main_column_offset] == board[7 + column_offset + main_column_offset] ==\n board[14 + column_offset + main_column_offset] == board[\n 21 + column_offset + main_column_offset] ==\n player_symbol):\n\n winner_id = True\n\n break\n\n else:\n\n column_offset += 7\n\n if winner_id:\n\n break\n\n else:\n\n main_column_offset += 1\n\n column_counter -= 1\n\n # Checking Diagonals #\n\n # Checking upper right diagonals\n\n diagonal_counter = 0\n\n while diagonal_counter <= 3:\n\n if (board[3 + diagonal_counter] == board[9 + diagonal_counter] == board[15 + diagonal_counter] ==\n board[21 + diagonal_counter] == player_symbol):\n\n winner_id = True\n\n break\n\n else:\n\n diagonal_counter += 1\n\n # Checking upper left diagonals\n\n diagonal_counter = 0\n\n while diagonal_counter <= 3:\n\n if (board[3 - diagonal_counter] == board[11 - diagonal_counter] == board[19 - diagonal_counter] ==\n board[27 - diagonal_counter] == player_symbol):\n\n winner_id = True\n\n break\n\n else:\n\n diagonal_counter += 1\n\n # Checking bottom right diagonals\n\n diagonal_counter = 0\n\n while diagonal_counter <= 3:\n\n if (board[35 + diagonal_counter] == board[29 + diagonal_counter] == board[23 + diagonal_counter] ==\n board[17 + diagonal_counter] == player_symbol):\n\n winner_id = True\n\n break\n\n else:\n\n diagonal_counter += 1\n\n # Checking bottom left diagonals\n\n diagonal_counter = 0\n\n while diagonal_counter <= 3:\n\n if (board[41 - diagonal_counter] == board[33 - diagonal_counter] == board[25 - diagonal_counter] ==\n board[17 - diagonal_counter] == player_symbol):\n\n winner_id = True\n\n break\n\n else:\n\n diagonal_counter += 1\n\n # Checking mid-right diagonals\n\n diagonal_counter = 0\n\n while diagonal_counter <= 3:\n\n if (board[28 + diagonal_counter] == board[22 + diagonal_counter] == board[16 + diagonal_counter] ==\n board[10 + diagonal_counter] == player_symbol):\n\n winner_id = True\n\n break\n\n else:\n\n diagonal_counter += 1\n\n # Checking mid-left diagonals\n\n diagonal_counter = 0\n\n while diagonal_counter <= 3:\n\n if (board[34 - diagonal_counter] == board[26 - diagonal_counter] == board[18 - diagonal_counter] ==\n board[10 - diagonal_counter] == player_symbol):\n\n winner_id = True\n\n break\n\n else:\n\n diagonal_counter += 1\n\n return winner_id\n\n def connect_four_replay(winner):\n\n if winner == 0:\n\n print('The game is a tie, so it\\'s a Cat!')\n\n print()\n\n connect_four_score['Cats'] += 1\n\n elif winner == 1:\n\n print('Congrats! You won the game!')\n\n print()\n\n connect_four_score['User Wins'] += 1\n\n elif winner == 2:\n\n print('The computer won the game!')\n\n print()\n\n connect_four_score['Computer Wins'] += 1\n\n print('Here is the score: ')\n\n print(connect_four_score)\n\n print()\n\n while True:\n\n replay = input('Would you like to play again? (\"Yes\"/\"No\"): ')\n\n if replay.upper() == 'YES':\n\n print('Okay, let\\'s play again!')\n\n x = 60\n\n while x > 0:\n\n print()\n\n x -= 1\n\n break\n\n elif replay.upper() == 'NO':\n\n print('Okay, good game!')\n\n print()\n\n game_selector_hub()\n\n else:\n\n print('Please type either \"Yes\" or \"No\".')\n\n print()\n\n connect_four_greet()\n\n while True:\n\n first_player = connect_four_coin_toss()\n\n game_board = board_maker()\n\n game_winner_number = connect_four_gameplay(first_player, game_board)\n\n connect_four_replay(game_winner_number)\n\n\ngame_selector_greeting()\n\ngame_selector_hub()\n","repo_name":"Hmurphy24/Game_Selector","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":147412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"25467584072","text":"from functools import partial\n\nfrom PyQt5.QtGui import QIcon, QCursor, QTextCursor\nfrom PyQt5.QtWidgets import QCheckBox, QPushButton, QTextEdit, QMenu, QLabel, \\\n QSpinBox, QToolButton, QWidget, QTabWidget, QGroupBox, QComboBox, \\\n QLineEdit, QHBoxLayout, QVBoxLayout\nfrom PyQt5.QtCore import Qt\n\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\n\nfrom ...GUI.ligne_commande import LigneCommande\nfrom ...GUI.qtlib import png\nfrom ...GUI.inspecteur import FenCode\nfrom ...GUI.menu import MenuBar\nfrom ...GUI.panel import Panel_simple\nfrom ...mathlib.interprete import Interprete\nfrom ...mathlib.parsers import latex2mathtext\nfrom ...mathlib.end_user_functions import __classement__\n\nfrom ...pylib import print_error, debug, no_argument, eval_safe\nfrom ... import param\n\n\nclass CalculatriceMenuBar(MenuBar):\n def __init__(self, panel):\n MenuBar.__init__(self, panel)\n self.ajouter(\"Fichier\", [\"Réinitialiser\",\n \"Réinitialiser la calculatrice.\", \"Ctrl+N\",\n self.panel.initialiser],\n [\"ouvrir\"], [\"enregistrer\"],\n [\"enregistrer_sous\"], ['session'], None, [\"quitter\"])\n self.ajouter(\"Affichage\", [\"onglet\"], [\"plein_ecran\"])\n for rubrique in __classement__:\n self.ajouter(rubrique, *(self.formater(contenu, rubrique != \"Symboles\") for contenu in __classement__[rubrique]))\n # pas de parenthese apres un symbole\n self.ajouter(\"Outils\",\n [\"Mémoriser le résultat\", \"Copie le resultat du calcul dans le presse-papier, afin de pouvoir l'utiliser ailleurs.\", \"Ctrl+M\", self.panel.vers_presse_papier],\n [\"options\"],\n )\n self.ajouter(\"Avancé\",\n [\"État interne de l'interprète\", \"État de l'interprète de commandes.\", \"Ctrl+H\", self.panel.EtatInterne],\n [\"ligne_commande\"],\n [\"debug\"],\n )\n self.ajouter(\"?\")\n\n\n def formater(self, contenu, parentheses = True):\n if contenu is None:\n return\n titre, nom, doc = contenu\n return [titre, doc, \"\", partial(self.panel.insere, nom=nom, parentheses=parentheses)]\n\n\n\nclass BoutonValider(QToolButton):\n\n modes = [('exact', 'résultats exacts'), ('approche', 'résultats approchés'),\n ('scientifique', 'résultats en écriture scientifique')]\n\n def __init__(self, parent):\n QToolButton.__init__(self)\n self.parent = parent\n self.setAutoRaise(True)\n self.mode_normal()\n\n self.menu = menu = QMenu(self)\n self.setMenu(menu)\n for mode, titre in self.modes:\n nom = 'mode_' + mode\n action = menu.addAction(QIcon(png(nom)), titre)\n action.setIconVisibleInMenu(True)\n action.triggered.connect(getattr(self, nom))\n self.clicked.connect(self.mode_occupe)\n\n\n def _set_icon(self, nom):\n pix = png(nom)\n self.setIcon(QIcon(pix))\n self.setIconSize(pix.size())\n\n def mode_exact(self, *args):\n self._set_icon('mode_exact_')\n self.parent.param(\"calcul_exact\", True)\n self.parent.param(\"ecriture_scientifique\", False)\n\n def mode_approche(self, *args):\n self._set_icon('mode_approche_')\n self.parent.param(\"calcul_exact\", False)\n self.parent.param(\"ecriture_scientifique\", False)\n\n def mode_scientifique(self, *args):\n self._set_icon('mode_scientifique_')\n self.parent.param(\"calcul_exact\", False)\n self.parent.param(\"ecriture_scientifique\", True)\n\n def mode_occupe(self):\n self._set_icon('thinking2_')\n\n def mode_normal(self):\n if self.parent.param(\"calcul_exact\"):\n self.mode_exact()\n elif self.parent.param(\"ecriture_scientifique\"):\n self.mode_scientifique()\n else:\n self.mode_approche()\n\n\n\nclass PaveNumerique(QWidget):\n def __init__(self, parent):\n QWidget.__init__(self, parent)\n self.parent = parent\n ### Pave numerique de la calculatrice ###\n # On construit le pavé de la calculatrice.\n # Chaque bouton du pavé doit provoquer l'insertion de la commande correspondante.\n\n self.pave = pave = QVBoxLayout()\n# pave.setSpacing(1)\n boutons = [\"2nde\", \"ans\", \"ouv\", \"ferm\", \"egal\", \"7\", \"8\", \"9\", \"div\", \"x\", \"4\", \"5\", \"6\", \"mul\", \"y\", \"1\", \"2\", \"3\", \"minus\", \"z\", \"0\", \"pt\", \"pow\", \"plus\", \"t\", \"rac\", \"sin\", \"cos\", \"tan\", \"exp\", \"i\", \"pi\", \"e\", \"abs\", \"mod\"]\n inserer = [\"\", \"ans()\", \"(\", \")\", \"=\", \"7\", \"8\", \"9\", \"/\", \"x\", \"4\", \"5\", \"6\", \"*\", \"y\", \"1\", \"2\", \"3\", \"-\", \"z\", \"0\", \".\", \"^\", \"+\", \"t\", \"sqrt(\", (\"sin(\", \"asin(\", \"sinus / arcsinus\"), (\"cos(\", \"acos(\", \"cosinus / arccosinus\"), (\"tan(\", \"atan(\", \"tangente / arctangente\"), (\"exp(\", \"ln(\", \"exponentielle / logarithme neperien\"), (\"i\", \"cbrt(\", \"i / racine cubique\"), (\"pi\", \"sinh(\", \"pi / sinus hyperbolique\"), (\"e\", \"cosh\", \"e / cosinus hyperbolique\"), (\"abs(\", \"tanh\", \"valeur absolue / tangente hyperbolique\"), (\" mod \", \"log10(\", \"modulo / logarithme decimal\")]\n\n self.seconde = False # indique si la touche 2nde est activee.\n\n self.actions = [self.touche_2nde]\n\n for i, nom_bouton in enumerate(boutons):\n # On aligne les boutons de la calculatrice par rangées de 5.\n if i%5 == 0:\n self.rangee = rangee = QHBoxLayout()\n rangee.addStretch(1)\n pave.addLayout(rangee)\n\n # Ensuite, on construit une liste de fonctions, parallèlement à la liste des boutons.\n if i > 0:\n self.actions.append(partial(self.action, commande=inserer[i]))\n\n bouton = QPushButton()\n pix = png('btn_' + nom_bouton)\n bouton.setIcon(QIcon(pix))\n bouton.setIconSize(pix.size())\n bouton.setFlat(True)\n# bouton.SetBackgroundColour(self.GetBackgroundColour())\n rangee.addWidget(bouton)\n if i%5 == 4:\n rangee.addStretch(2)\n # A chaque bouton, on associe une fonction de la liste.\n bouton.clicked.connect(self.actions[i])\n if type(inserer[i]) == tuple:\n bouton.setToolTip(inserer[i][2])\n\n self.setLayout(self.pave)\n\n def touche_2nde(self, event=None):\n self.seconde = not self.seconde\n if self.seconde:\n self.message(\"Touche [2nde] activée.\")\n else:\n self.message(\"\")\n\n def action(self, event=None, commande=''):\n entree = self.parent.entree\n if type(commande) == tuple:\n entree.insert(commande[self.seconde])\n else:\n entree.insert(commande)\n n = entree.cursorPosition()\n entree.setFocus()\n entree.setCursorPosition(n)\n self.seconde = False\n self.parent.message(\"\")\n\n\n\nclass Options(QWidget):\n def __init__(self, parent):\n QWidget.__init__(self, parent)\n self.parent = parent\n prm = parent.param\n\n self.ensembles = ('R', 'C')\n\n ### Liste des options de la calculatrice ###\n self.pave = QVBoxLayout()\n\n # Chiffres significatifs\n box = QGroupBox(\"Mode calcul approché\")\n box_layout = QVBoxLayout()\n box.setLayout(box_layout)\n\n ligne = QHBoxLayout()\n box_layout.addLayout(ligne)\n ligne.addWidget(QLabel(\"Afficher \"))\n self.sc_precision_affichage = sc = QSpinBox(self)\n # param.precision_calcul = 60 par défaut\n sc.setRange(1, 50)\n sc.setValue(prm(\"precision_affichage\"))\n sc.valueChanged.connect(self.EvtPrecisionAffichage)\n ligne.addWidget(sc)\n ligne.addWidget(QLabel(\" chiffre(s) significatif(s).\"))\n ligne.addStretch()\n\n self.pave.addWidget(box)\n\n # Nombre de décimales\n box = QGroupBox(\"Mode écriture scientifique\")\n box_layout = QVBoxLayout()\n box.setLayout(box_layout)\n\n ligne = QHBoxLayout()\n box_layout.addLayout(ligne)\n ligne.addWidget(QLabel(\"Arrondir les résultats à \"))\n self.sc_decimales = sc = QSpinBox(self)\n sc.setRange(0, 11)\n sc.setValue(prm(\"ecriture_scientifique_decimales\"))\n sc.valueChanged.connect(self.EvtDecimales)\n ligne.addWidget(sc)\n ligne.addWidget(QLabel(\" décimale(s).\"))\n ligne.addStretch()\n\n self.pave.addWidget(box)\n\n box = QGroupBox(\"Copie Automatique\")\n box_layout = QVBoxLayout()\n box.setLayout(box_layout)\n # Copie du résultat dans le presse-papier\n ligne = QHBoxLayout()\n box_layout.addLayout(ligne)\n self.cb_copie_automatique = cb = QCheckBox(self)\n cb.setChecked(prm(\"copie_automatique\"))\n cb.stateChanged.connect(self.EvtCopieAutomatique)\n cb.setToolTip(\"Copier automatiquement le résultat du calcul dans le presse-papier.\")\n ligne.addWidget(cb)\n ligne.addWidget(QLabel(\"Copie du résultat dans le presse-papier.\"))\n ligne.addStretch()\n\n # En mode LaTeX\n ligne = QHBoxLayout()\n box_layout.addLayout(ligne)\n self.cb_copie_automatique_LaTeX = cb = QCheckBox(self)\n cb.setChecked(prm(\"copie_automatique_LaTeX\"))\n cb.setToolTip(\"Copier le résultat du calcul au format LaTeX dans le presse-papier.\")\n ligne.addWidget(cb)\n cb.stateChanged.connect(self.EvtCopieAutomatiqueLatex)\n self.st_copie_automatique_LaTeX = st = QLabel(\"Copie au format LaTeX (si possible).\")\n ligne.addWidget(st)\n ligne.addStretch()\n\n self.pave.addWidget(box)\n #~ self.pave.addStretch()\n\n box = QGroupBox(\"Ensemble de résolution\")\n box_layout = QVBoxLayout()\n box.setLayout(box_layout)\n ligne = QHBoxLayout()\n box_layout.addLayout(ligne)\n ligne.addWidget(QLabel('Résoudre et factoriser dans '))\n self.cb_ensemble = cb = QComboBox()\n ligne.addWidget(cb)\n cb.addItems(('R (réels)', 'C (complexes)'))\n cb.setCurrentIndex(self.ensembles.index(prm('ensemble')))\n cb.currentIndexChanged.connect(self.EvtEnsemble)\n self.pave.addWidget(box)\n self.pave.addStretch()\n\n self.setLayout(self.pave)\n # Pour (dés)activer la ligne \"Copie au format LaTeX\" au besoin.\n self.EvtCopieAutomatique()\n\n def EvtPrecisionAffichage(self, event=None):\n val = self.sc_precision_affichage.value()\n self.parent.param(\"precision_affichage\", val)\n\n\n def EvtDecimales(self, event=None):\n val = self.sc_decimales.value()\n self.parent.param(\"ecriture_scientifique_decimales\", val)\n\n\n def EvtCopieAutomatique(self, event=None):\n valeur = self.cb_copie_automatique.isChecked()\n self.parent.param(\"copie_automatique\", valeur)\n if valeur:\n self.cb_copie_automatique_LaTeX.setEnabled(True)\n self.st_copie_automatique_LaTeX.setEnabled(True)\n else:\n self.cb_copie_automatique_LaTeX.setEnabled(False)\n self.st_copie_automatique_LaTeX.setEnabled(False)\n\n\n def EvtCopieAutomatiqueLatex(self, event=None):\n val = self.cb_copie_automatique_LaTeX.isChecked()\n self.parent.param(\"copie_automatique_LaTeX\", val)\n\n\n def EvtEnsemble(self, index):\n self.parent.param(\"ensemble\", self.ensembles[index])\n\n\n\nclass Avance(QWidget):\n def __init__(self, parent):\n QWidget.__init__(self, parent)\n self.parent = parent\n prm = parent.param\n\n\n ### Liste des options avancées de la calculatrice ###\n self.pave = QVBoxLayout()\n\n box = QGroupBox(\"Post-traitement\")\n box_layout = QVBoxLayout()\n box.setLayout(box_layout)\n box_layout.addWidget(QLabel(\"Traitement automatiquement du résultat :\"))\n self.traitement = QLineEdit()\n traitement = self.parent.param(\"appliquer_au_resultat\") or '_'\n self.traitement.setText(traitement)\n self.traitement.setMinimumWidth(100)\n self.traitement.setToolTip(\"Fonction ou opérations à appliquer automatiquement au résultat (représenté par _). Ex: 'factoriser(_)'.\")\n self.traitement.editingFinished.connect(self.EvtAppliquerResultat)\n box_layout.addWidget(self.traitement)\n self.pave.addWidget(box)\n self.setLayout(self.pave)\n self.pave.addStretch()\n\n\n def EvtAppliquerResultat(self, event=None):\n val = self.traitement.text().strip()\n if not val:\n self.traitement.setText('_')\n val = None\n elif val == '_':\n val = None\n self.parent.param(\"appliquer_au_resultat\", val)\n\n\n\n\nclass OngletsCalc(QTabWidget):\n def __init__(self, parent):\n QTabWidget.__init__(self, parent)\n self.addTab(PaveNumerique(parent), ' Pavé numérique ')\n self.addTab(Options(parent), 'Options')\n self.addTab(Avance(parent), 'Avancé')\n self.setTabPosition(QTabWidget.South)\n self.setStyleSheet(\"\"\"\n QTabBar::tab:selected {\n background: white;\n border: 1px solid #C4C4C3;\n border-top-color: white; /* same as the pane color */\n border-bottom-left-radius: 4px;\n border-bottom-right-radius: 4px;\n border-top-left-radius: 0px;\n border-top-right-radius: 0px;\n min-width: 8ex;\n padding: 7px;\n }\n QStackedWidget {background:white}\n QTabBar QToolButton {\n background:white;\n border: 1px solid #C4C4C3;\n border-top-color: white; /* same as the pane color */\n border-bottom-left-radius: 4px;\n border-bottom-right-radius: 4px;\n border-top-left-radius: 0px;\n border-top-right-radius: 0px;\n }\n \"\"\")\n\n\n\n\n\nclass Calculatrice(Panel_simple):\n titre = \"Calculatrice\" # Donner un titre a chaque module\n\n def __init__(self, *args, **kw):\n Panel_simple.__init__(self, *args, **kw)\n self.interprete = Interprete(calcul_exact = self.param(\"calcul_exact\"),\n ecriture_scientifique = self.param(\"ecriture_scientifique\"),\n formatage_OOo = self.param(\"formatage_OOo\"),\n formatage_LaTeX = self.param(\"formatage_LaTeX\"),\n ecriture_scientifique_decimales = self.param(\"ecriture_scientifique_decimales\"),\n precision_calcul = self.param(\"precision_calcul\"),\n precision_affichage = self.param(\"precision_affichage\"),\n appliquer_au_resultat = self.param('appliquer_au_resultat'),\n simpify = True,\n ensemble=self.param('ensemble'),\n )\n\n bouton = BoutonValider(self)\n bouton.setToolTip(\"Laissez appuyé pour changer de mode.\")\n self.entree = entree = LigneCommande(self, longueur=550,\n action=self.affichage_resultat, bouton=bouton)\n entree.setToolTip(\"[Maj]+[Entrée] pour une valeur approchée.\")\n self.entree.texte.setContextMenuPolicy(Qt.CustomContextMenu)\n self.entree.texte.customContextMenuRequested.connect(self.EvtMenu)\n\n\n self.sizer = sizer = QVBoxLayout()\n sizer.addWidget(entree)\n self.corps = corps = QHBoxLayout()\n sizer.addLayout(corps)\n self.resultats = resultats = QTextEdit(self)\n resultats.setMinimumSize(450, 310)\n resultats.setReadOnly(True)\n corps.addWidget(resultats, 1)\n onglets = OngletsCalc(self)\n corps.addWidget(onglets)\n onglets.setCurrentIndex(self.param('onglet'))\n onglets.currentChanged.connect(self.EvtCurrentChanged)\n\n self.figure = Figure(figsize=(5,1.3), frameon=True, facecolor=\"w\")\n self.visualisation = FigureCanvas(self.figure)\n self.axes = axes = self.figure.add_axes([0, 0, 1, 1], frameon=False)\n axes.axison = False\n self.pp_texte = axes.text(0.5, 0.5, \"\", horizontalalignment='center',\n verticalalignment='center', transform = axes.transAxes, size=18)\n self.visualisation.setContextMenuPolicy(Qt.CustomContextMenu)\n self.visualisation.customContextMenuRequested.connect(self.EvtMenuVisualisation)\n sizer.addWidget(self.visualisation)\n\n self.setLayout(self.sizer)\n self.initialiser()\n\n\n def activer(self):\n Panel_simple.activer(self)\n # Actions à effectuer lorsque l'onglet devient actif\n self.entree.setFocus()\n\n\n def _sauvegarder(self, fgeo):\n fgeo.contenu[\"Calculatrice\"] = [{}]\n fgeo.contenu[\"Calculatrice\"][0][\"Historique\"] = [repr(self.entree.historique)]\n fgeo.contenu[\"Calculatrice\"][0][\"Affichage\"] = [self.resultats.toPlainText()]\n fgeo.contenu[\"Calculatrice\"][0][\"Etat_interne\"] = [self.interprete.save_state()]\n\n\n\n def _ouvrir(self, fgeo):\n if \"Calculatrice\" in fgeo.contenu:\n calc = fgeo.contenu[\"Calculatrice\"][0]\n self.initialiser()\n\n self.entree.historique = eval_safe(calc[\"Historique\"][0])\n resultats = calc[\"Affichage\"][0]\n if resultats:\n resultats += '\\n\\n'\n self.resultats.setPlainText(resultats)\n self.resultats.moveCursor(QTextCursor.End)\n self.interprete.load_state(calc[\"Etat_interne\"][0])\n\n\n def modifier_pp_texte(self, chaine):\n \"\"\"Modifier le résultat affiché en LaTeX (pretty print).\"\"\"\n if self.param(\"latex\"):\n # On utilise directement LaTeX pour le rendu\n chaine = \"$\" + chaine + \"$\"\n else:\n # On utilise le parser matplotlib.mathtext, moins complet mais bien\n # plus rapide. Certaines adaptations doivent être faites.\n chaine = latex2mathtext(chaine)\n self.pp_texte.set_text(chaine)\n self.visualisation.draw()\n\n def vers_presse_papier(self, event = None, texte = None):\n if texte is None:\n texte = self.dernier_resultat\n Panel_simple.vers_presse_papier(texte)\n\n def copier_latex(self, event = None):\n self.vers_presse_papier(texte = self.interprete.latex_dernier_resultat.strip(\"$\"))\n\n def initialiser(self, event = None):\n self.dernier_resultat = \"\" # dernier resultat, sous forme de chaine formatee pour l'affichage\n self.entree.initialiser()\n self.interprete.initialiser()\n self.resultats.clear()\n\n def affichage_resultat(self, commande, **kw):\n # Commandes spéciales:\n if commande in ('clear', 'clear()', 'efface', 'efface()'):\n self.initialiser()\n self.modifier_pp_texte(\"Calculatrice réinitialisée.\")\n return\n\n self.modifie = True\n try:\n try:\n ##self.parent.parent.application.processEvents()\n if kw.get(\"shift\"):\n self.interprete.calcul_exact = False\n resultat, latex = self.interprete.evaluer(commande)\n if latex == \"$?$\": # provoque une erreur (matplotlib 0.99.1.1)\n latex = \"Désolé, je ne sais pas faire...\"\n finally:\n self.interprete.calcul_exact = self.param('calcul_exact')\n self.entree.bouton.mode_normal()\n aide = resultat.startswith(\"\\n== Aide sur \")\n if aide:\n latex = ''\n elif not latex:\n latex = resultat\n #LaTeX\n debug(\"Expression LaTeX: \" + latex)\n try:\n try:\n # Affichage en LaTeX si possible.\n self.modifier_pp_texte(latex)\n except Exception:\n print_error()\n # Sinon, affichage en texte simple.\n # `matplotlib.mathtext` est encore loin d'être\n # pleinement compatible avec LaTeX !\n self.modifier_pp_texte(resultat)\n except Exception:\n print_error()\n # Si tout a raté... mais ça ne devrait jamais arrivé.\n self.modifier_pp_texte(\"\")\n #Presse-papier\n self.dernier_resultat = resultat\n if self.param(\"copie_automatique\"):\n if self.param(\"copie_automatique_LaTeX\"):\n self.copier_latex()\n else:\n self.vers_presse_papier()\n # TextCtrl\n numero = str(len(self.interprete.derniers_resultats))\n # Évite le décalage entre la première ligne et les suivantes (matrices)\n if \"\\n\" in resultat and not aide:\n resultat = \"\\n\" + \"\\n\".join(20*\" \" + ligne for ligne in resultat.split(\"\\n\"))\n self.resultats.moveCursor(QTextCursor.End)\n self.resultats.insertPlainText(\" Calcul n\\xb0\" + numero + \" : \"\n + commande + \"\\n Résultat :\"\n + \" \"*(4+len(numero))\n + resultat + \"\\n__________________\\n\\n\")\n self.resultats.moveCursor(QTextCursor.End)\n self.message(\"Calcul effectué.\" + self.interprete.warning)\n self.entree.clear()\n# self.resultats.setCursorPosition(len(self.resultats.plainText()))\n# self.resultats.setFocus()\n# self.resultats.ScrollLines(1)\n self.entree.setFocus()\n except Exception:\n self.message(\"Calcul impossible.\")\n self.entree.setFocus()\n if param.debug:\n raise\n\n\n def insere(self, event=None, nom='', parentheses=True):\n entree = self.entree\n deb, fin = entree.getSelection()\n if parentheses:\n entree.setCursorPosition(fin)\n entree.insert(\")\")\n entree.setCursorPosition(deb)\n entree.insert(nom + \"(\")\n entree.setFocus()\n if deb == fin:\n final = fin + len(nom) + 1\n else:\n final = fin + len(nom) + 2\n else:\n entree.insert(nom)\n final = fin + len(nom)\n entree.setFocus()\n entree.setCursorPosition(final)\n\n\n def EvtMenu(self, event):\n menu = QMenu()\n menu.setWindowTitle(\"Fonctions mathématiques\")\n debut = True\n for rubrique in __classement__:\n if not debut:\n menu.addSeparator()\n debut = False\n for titre, nom, doc in (_f for _f in __classement__[rubrique] if _f):\n action = menu.addAction(titre, partial(self.insere, nom=nom, parentheses=(rubrique != \"Symboles\")))\n # Pas de parenthèses après un symbole.\n action.setToolTip(doc)\n menu.exec_(QCursor.pos())\n\n\n def EvtMenuVisualisation(self, event):\n menu = QMenu()\n action = menu.addAction(\"Copier LaTeX\", self.copier_latex)\n action.setToolTip(\"Copier le code LaTeX dans le presse-papier.\")\n menu.exec_(QCursor.pos())\n\n def EvtCurrentChanged(self, index):\n self.param('onglet', index)\n\n def param(self, parametre, valeur = no_argument, defaut = False):\n if valeur is not no_argument:\n setattr(self.interprete, parametre, valeur)\n return Panel_simple.param(self, parametre = parametre, valeur = valeur, defaut = defaut)\n\n def EtatInterne(self, event):\n contenu = self.interprete.save_state()\n h = FenCode(self, \"État interne de l'interprète\", contenu, self.interprete.load_state)\n h.show()\n","repo_name":"wxgeo/geophar","sub_path":"wxgeometrie/modules/calculatrice/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":23897,"program_lang":"python","lang":"fr","doc_type":"code","stars":6,"dataset":"github-code","pt":"4"} +{"seq_id":"40949241457","text":"from hashlib import md5\n\nwith open(\"input/04.txt\") as f:\n input = f.read()\n\n\ndef hexhash(word):\n return md5(word.encode()).hexdigest()\n\n\nfor i in range(1, 99999999999):\n if hexhash(f\"{input}{i}\").startswith(\"00000\"):\n print(\"1:\", i)\n break\n\nfor i in range(1, 99999999999):\n if hexhash(f\"{input}{i}\").startswith(\"000000\"):\n print(\"2:\", i)\n break\n","repo_name":"don-patterson/advent-of-code","sub_path":"2015/day-04.py","file_name":"day-04.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"15868404999","text":"# 数据源管理\nfrom arkid.core import routers\nfrom . import permission_sync,scim_sync\n\nrouter = routers.FrontRouter(\n path='data_source',\n name='身份数据源',\n icon='data_source',\n children=[\n scim_sync.router,\n permission_sync.router,\n ]\n)","repo_name":"longguikeji/arkid","sub_path":"api/v1/pages/data_source_manage/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":1454,"dataset":"github-code","pt":"4"} +{"seq_id":"34203095364","text":"import pandas as pd\nfrom pandas.api.types import is_numeric_dtype\nimport seaborn\n\ndef check_data(data, features):\n if isinstance(data, pd.DataFrame) and isinstance(features, list):\n for word in features:\n if not isinstance(word, str):\n return 1\n return 0\n return 1\n\nclass MyPlotLib(object):\n\n @staticmethod\n def histogram(data, features):\n if check_data(data, features) == 1:\n print('HISTOGRAM: error data types')\n else:\n for word in features:\n if not is_numeric_dtype(data[word]):\n print('HISTOGRAM: %s - error data types' %(word))\n features.remove(word)\n dfilter = data[features]\n dfilter.hist()\n\n @staticmethod\n def density(data, features):\n if check_data(data, features) == 1:\n print('DENSITY: error data types')\n else: \n for word in features:\n if not is_numeric_dtype(data[word]):\n print('DENSITY: %s - error data types' %(word))\n features.remove(word)\n dfilter = data[features]\n dfilter.plot.kde()\n \n @staticmethod\n def pair_plot(data, features):\n if check_data(data, features) == 1:\n print('PAIR_PLOT: error data types')\n else:\n for word in features:\n if not is_numeric_dtype(data[word]):\n print('PAIT PLOT: %s - error data types' %(word))\n features.remove(word)\n dfilter = data[features]\n seaborn.pairplot(dfilter)\n\n @staticmethod\n def box_plot(data, features): \n if check_data(data, features) == 1:\n print('BOX_PLOT: error data types')\n else:\n for word in features:\n if not is_numeric_dtype(data[word]):\n print('BOX PLOT: %s - error data types' %(word))\n features.remove(word)\n dfilter = data[features]\n dfilter.boxplot()","repo_name":"joann8/Piscine-Python","sub_path":"P04/ex07/MyPlotLib.py","file_name":"MyPlotLib.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"5826121276","text":"import pickle\n\ndict_1 = {1: 2, 3:4}\n\nwrite = open('dict_1.pickle', 'wb')\npickle.dump(dict_1, write)\nwrite.close()\n\nread = open('dict_1.pickle', 'rb')\nnew_dict = pickle.load(read)\nread.close()\n\nprint(new_dict[1])\n\ndict_2 = {'apple': 2, 'orange': 5}\n\nwrite = open('dict_2.pickle', 'wb')\npickle.dump(dict_2, write)\nwrite.close()\n\nread = open('dict_2.pickle', 'rb')\nnew_dict_2 = pickle.load(read)\nread.close()\n\nprint(new_dict_2['orange'])","repo_name":"SangwookCheon/WhaleCNN","sub_path":"pickle_practice.py","file_name":"pickle_practice.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"19037268100","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\n\nfrom django.shortcuts import render, render_to_response, redirect, reverse, redirect\nfrom spotify_client import scripts\nfrom django.urls import reverse\n\ndef index(r):\n return render(r, 'artists/artists.html')\n\n\ndef search_artists(r):\n form = {\n 'artist_name': r.GET.get('artist_name', ''),\n 'add_artist': r.GET.get('add_artist', ''),\n 'rm_artist': r.GET.get('rm_artist', ''),\n }\n\n if form['add_artist']:\n scripts.add_artist_to_user(form['add_artist'], r.user)\n return redirect(\"%s?artist_name=%s\" % (reverse('artists:search_artists'), form['artist_name']))\n\n if form['rm_artist']:\n scripts.delete_artist_from_user(form['rm_artist'], r.user)\n return redirect(\"%s?artist_name=%s\" % (reverse('artists:search_artists'), form['artist_name']))\n\n search_result = scripts.search_artist(r.user, form['artist_name'])\n\n return render(r, 'artists/search_artists.html', {'form': form, 'search_result': search_result})\n\n\ndef favorite_artists(r):\n form = {\n 'rm_artist': r.GET.get('rm_artist', ''),\n }\n\n if form['rm_artist']:\n scripts.delete_artist_from_user(form['rm_artist'], r.user)\n return redirect(reverse('artists:favorite_artists'))\n\n fav_artists = scripts.preferences_of_user(r.user)\n\n return render(r, 'artists/favorite_artists.html', {'favorite_artists': fav_artists})\n\n","repo_name":"palasonic1/music_app","sub_path":"music_news_feed/artists/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"24285097640","text":"# skip:start\nfrom typegraph import typegraph, t, Graph\nfrom typegraph.runtimes import HttpRuntime\n\n\n@typegraph()\ndef runtimes(g: Graph):\n # skip:end\n http = HttpRuntime(\"https://random.org/api\")\n\n # same func as above\n http.get(\n \"/flip_coin\", t.struct({}), t.enum([\"head\", \"tail\"])\n ) # implicitly attaches runtime to all types\n","repo_name":"metatypedev/metatype","sub_path":"website/docs/concepts/mental-model/runtimes.py","file_name":"runtimes.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"4"} +{"seq_id":"33236266259","text":"import json\nimport os\nimport re\nfrom collections import OrderedDict\n\nimport aiohttp\nimport cloudscraper\nimport requests\nfrom fastapi import APIRouter\nfrom urllib3 import Retry\n\nfrom models.account import RiotLogin\nfrom requests.adapters import HTTPAdapter, PoolManager\nfrom starlette.responses import JSONResponse, Response\nimport redis\n\n# Set up the Redis client\nredis_client = redis.Redis(host='localhost', port=6379, db=0)\nuser_sessions = {}\nAPI_TOKEN = os.getenv('HENRIK_API_TOKEN')\n\naccount = APIRouter(prefix=\"/account\", tags=[\"account\"])\n\ndef get_build():\n url = \"https://valorant-api.com/v1/version\"\n response = requests.get(url)\n\n if response.status_code == 200:\n data = response.json()\n riot_client_build = data[\"data\"][\"riotClientBuild\"]\n return riot_client_build\n else:\n raise Exception(f\"Failed to retrieve build information. Status code: {response.status_code}\")\n\nriot_client_build = get_build()\n\ndef create_sesh(retries=3, backoff_factor=0.3, status_forcelist=(500, 502, 504)):\n session = cloudscraper.create_scraper() # Create a CloudScraper session\n headers = OrderedDict({\n \"Accept-Language\": \"en-US,en;q=0.9\",\n \"Accept\": \"application/json, text/plain, */*\",\n 'User-Agent': f\"RiotClient/{riot_client_build} rso-auth (Windows; 10;;Professional, x64)\"\n })\n\n session.headers.update(headers) # Update the session headers\n\n retry = Retry(\n total=retries,\n backoff_factor=backoff_factor,\n status_forcelist=status_forcelist,\n method_whitelist=frozenset(['GET', 'POST', 'PUT']),\n )\n\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n\n return session\n\n@account.post(\"/login/riot\")\ndef checker(loginrequest : RiotLogin):\n successfulr1 = False\n while successfulr1 == False:\n session = create_sesh()\n\n data = {\n \"client_id\": \"play-valorant-web-prod\",\n \"nonce\": \"1\",\n \"redirect_uri\": \"https://playvalorant.com/opt_in\",\n \"response_type\": \"token id_token\",\n 'scope': 'account openid',\n }\n headers = {\n 'Content-Type': 'application/json',\n 'User-Agent': f\"RiotClient/{riot_client_build} rso-auth (Windows; 10;;Professional, x64)\",\n }\n r = session.post(f'https://auth.riotgames.com/api/v1/authorization', json=data, headers=headers)\n if not \"html\" in r.text:\n successfulr1 = True\n\n successfulr2 = False\n while successfulr2 == False:\n data = {\n 'type': 'auth',\n 'username': loginrequest.username,\n 'password': loginrequest.password\n }\n r2 = session.put('https://auth.riotgames.com/api/v1/authorization',\n json=data, headers=headers,\n cookies=r.cookies)\n\n if not \"html\" in r.text:\n successfulr2 = True\n\n if not \"auth_failure\" in r2.text:\n\n if \"multifactor\" in r2.text:\n # Serialize the session and cookies and store them in Redis\n user_sessions_key = f\"user_sessions_{loginrequest.username}\"\n user_data = {\n 'headers': dict(session.headers),\n 'cookies': requests.utils.dict_from_cookiejar(r2.cookies)\n }\n redis_client.set(user_sessions_key, json.dumps(user_data))\n return JSONResponse(content={\"status\": \"2FA\", \"username\": loginrequest.username}, status_code=200)\n\n else:\n # print(r2.text)\n access_token = r2.json()['response']['parameters']['uri']\n access_token = access_token.split(\"#access_token=\")[1]\n access_token = re.split(r'&', access_token)[0]\n\n headers = {\n 'User-Agent': f\"RiotClient/{riot_client_build} rso-auth (Windows; 10;;Professional, x64)\",\n 'Authorization': f'Bearer {access_token}',\n }\n r4 = session.post('https://entitlements.auth.riotgames.com/api/token/v1',\n headers=headers,\n json={},\n cookies=r2.cookies)\n\n # print(r4.text)\n\n entitlement = r4.json()['entitlements_token']\n\n r5 = session.post('https://auth.riotgames.com/userinfo', headers=headers, json={})\n\n data = r5.json()\n puuid = data['sub']\n\n # print(f\"Accestoken: {access_token}\")\n # print(\"-\" * 50)\n # print(f\"Entitlements: {entitlement}\")\n # print(\"-\" * 50)\n # print(f\"Userid: {puuid}\")\n\n response_json = {\n \"status\": \"success\",\n \"puuid\": puuid,\n }\n\n return JSONResponse(status_code=200,\n content=response_json)\n\n else:\n return JSONResponse(status_code=400, content={\"status\": \"error\", \"message\": \"Incorrect Username/Password\"})\n#\n@account.post(\"/login/riot/2fa/{username}\")\nasync def login_2fa(username: str, code: str):\n user_sessions_key = f\"user_sessions_{username}\"\n user_data = redis_client.get(user_sessions_key)\n\n if user_data is None:\n return JSONResponse(status_code=400, content={\"status\": \"error\", \"message\": \"Username not found\"})\n\n\n # Deserialize the session and cookies\n user_data = json.loads(user_data)\n session = create_sesh()\n cookies = requests.utils.cookiejar_from_dict(user_data['cookies'])\n session.headers.update(user_data['headers'])\n\n successfulr3 = False\n while successfulr3 == False:\n\n data = {\n 'type': 'multifactor',\n 'code': code,\n 'rememberDevice': True\n }\n headers = {\n 'Content-Type': 'application/json',\n 'User-Agent': f\"RiotClient/{riot_client_build} rso-auth (Windows; 10;;Professional, x64)\",\n }\n r3 = session.put('https://auth.riotgames.com/api/v1/authorization',\n json=data, headers=headers,\n cookies=cookies)\n\n if not \"html\" in r3.text:\n successfulr3 = True\n\n if not \"multifactor_attempt_failed\" in r3.text:\n access_token = r3.json()['response']['parameters']['uri']\n access_token = access_token.split(\"#access_token=\")[1]\n access_token = re.split(r'&', access_token)[0]\n\n headers = {\n 'User-Agent': f\"RiotClient/{riot_client_build} rso-auth (Windows; 10;;Professional, x64)\",\n 'Authorization': f'Bearer {access_token}',\n }\n r4 = session.post('https://entitlements.auth.riotgames.com/api/token/v1',\n headers=headers,\n json={},\n cookies=r3.cookies)\n\n # print(r4.text)\n\n entitlement = r4.json()['entitlements_token']\n\n r5 = session.post('https://auth.riotgames.com/userinfo', headers=headers, json={})\n\n data = r5.json()\n puuid = data['sub']\n\n # print(f\"Accestoken: {access_token}\")\n # print(\"-\" * 50)\n # print(f\"Entitlements: {entitlement}\")\n # print(\"-\" * 50)\n # print(f\"Userid: {puuid}\")\n\n response_json = {\n \"status\": \"success\",\n \"puuid\": puuid,\n }\n\n redis_client.delete(user_sessions_key)\n return JSONResponse(status_code=200, content=response_json)\n\n else:\n # Serialize the session and cookies and store them in Redis\n user_data = {\n 'headers': dict(session.headers),\n 'cookies': requests.utils.dict_from_cookiejar(r3.cookies)\n }\n redis_client.set(user_sessions_key, json.dumps(user_data))\n return JSONResponse(status_code=400, content={\"status\": \"error\", \"message\": \"Incorrect 2FA code\"})\n\n\n@account.get(\"/get/puuid/{puuid}\")\nasync def get_account_details(puuid: str):\n async with aiohttp.ClientSession() as session:\n headers_henrik = {\n 'Authorization': f'{API_TOKEN}'\n }\n async with session.get(f'https://api.henrikdev.xyz/valorant/v1/by-puuid/account/{puuid}', headers=headers_henrik) as acc_details_response:\n acc_details_json = await acc_details_response.json()\n acc_region = acc_details_json['data']['region']\n acc_name = acc_details_json['data']['name']\n acc_tag = acc_details_json['data']['tag']\n\n async with session.get(f'https://api.henrikdev.xyz/valorant/v1/mmr/{acc_region}/{acc_name}/{acc_tag}', headers=headers_henrik) as rank_details_response:\n rank_details_json = await rank_details_response.json()\n\n if rank_details_json[\"data\"][\"currenttier\"] is None:\n account_json = {\n \"current_tier\": \"Unranked\",\n \"rank\": \"Unranked\",\n \"small_image_url\": None,\n \"large_image_url\": None,\n \"ranking_in_tier\": None,\n \"mmr_change_to_last_game\": None,\n \"elo\": 0.0,\n \"name_and_tag\": f\"{acc_name} #{acc_tag}\"\n }\n else:\n account_json = {\n \"current_tier\": rank_details_json[\"data\"][\"currenttier\"],\n \"rank\": rank_details_json[\"data\"][\"currenttierpatched\"],\n \"small_image_url\": rank_details_json[\"data\"][\"images\"][\"small\"],\n \"large_image_url\": rank_details_json[\"data\"][\"images\"][\"large\"],\n \"ranking_in_tier\": rank_details_json[\"data\"][\"ranking_in_tier\"],\n \"mmr_change_to_last_game\": rank_details_json[\"data\"][\"mmr_change_to_last_game\"],\n \"elo\": rank_details_json[\"data\"][\"elo\"],\n \"name_and_tag\": f\"{acc_name} #{acc_tag}\"\n }\n\n return account_json","repo_name":"naheedroomy/valorant-sl-leaderboard","sub_path":"routes/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":9758,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"70249179319","text":"from modules.Agent import Agent\nfrom modules.Game import Game\nimport json\n\n# Read metadata\nwith open('metadata.json', 'r') as f:\n metadata = json.load(f)\nlatest_generation_trained = metadata['latest_generation_trained']\n\ndef calculate_randomness(generation):\n if generation < 10:\n return 0.8, 0.2\n elif generation < 20:\n return 0.75, 0.25\n elif generation < 30:\n return 0.7, 0.3\n elif generation < 40:\n return 0.6, 0.4\n elif generation < 50:\n return 0.5, 0.4\n elif generation < 60:\n return 0.4, 0.4\n elif generation < 70:\n return 0.35, 0.35\n elif generation < 80:\n return 0.35, 0.3\n elif generation < 90:\n return 0.3, 0.3\n elif generation < 100:\n return 0.3, 0.25\n else:\n return 0.3, 0.2\n \nrandomness, probablilistic = calculate_randomness(latest_generation_trained)\n\n# Make players\nagent_1 = Agent( # current gen\n player_id=1,\n player_colour='white',\n model='models/model_{}.h5'.format(latest_generation_trained) if latest_generation_trained > 1 else None\n)\nagent_1.set_randomness_level(randomness)\nagent_1.set_probabilistic_level(probablilistic)\nagent_2 = Agent( # previous gen\n player_id=2,\n player_colour='red',\n model='models/model_{}.h5'.format(latest_generation_trained - 1) if latest_generation_trained > 1 else None\n)\nagent_2.set_randomness_level(randomness)\nagent_2.set_probabilistic_level(probablilistic)\n\nplayers = [agent_1, agent_2]\n\n# Make game\ngame = Game(\n players=players,\n print_board_state=True,\n print_board_delay=1,\n clear_screen_before_printing_board=True\n)\ngame.start()\nif game.winner:\n print('Player {} won'.format(game.winner))\n print('Q moves: {}'.format(game.move_number))","repo_name":"galarzafrancisco/ai_connect_4","sub_path":"play_against_the_machine.py","file_name":"play_against_the_machine.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"73016557236","text":"import tensorflow as tf\nimport numpy as np\nimport csv\nimport matplotlib.pyplot as plt\nfrom sklearn import preprocessing\nimport time\nimport wandb\nimport os\n\nwandb_use = True\nstart_time = time.time()\nif wandb_use == True:\n wandb.init(project=\"real_CNN_random_normalize\", tensorboard=False)\n\nclass Model:\n\n def __init__(self, sess, name):\n self.sess = sess\n self.name = name\n self._build_net()\n\n def _build_net(self):\n with tf.variable_scope(self.name):\n self.X = tf.placeholder(tf.float32, shape=[None, num_time_step*num_input], name = \"input\")\n self.X_input = tf.reshape(self.X, [-1, num_time_step, num_input, 1])\n self.Y = tf.placeholder(tf.int64, shape=[None, num_output], name= \"output\")\n self.keep_prob = tf.placeholder(tf.float32, name=\"keep_prob\")\n self.is_train = tf.placeholder(tf.bool, name=\"is_train\")\n self.hidden_layers = 0\n self.hidden_neurons = 20\n self.regularizer = tf.contrib.layers.l2_regularizer(scale=regul_factor)\n\n L1 = tf.layers.conv2d(inputs= self.X_input, filters= 32, kernel_size= [3,3], padding=\"SAME\", activation=tf.nn.relu, kernel_regularizer=self.regularizer)\n L1 = tf.layers.batch_normalization(L1, training=self.is_train)\n L1 = tf.layers.dropout(L1, rate=1-self.keep_prob, training=self.is_train)\n\n L2 = tf.layers.conv2d(inputs= L1, filters= 32, kernel_size= [3,3],padding=\"SAME\", activation=tf.nn.relu, kernel_regularizer=self.regularizer)\n L2 = tf.layers.batch_normalization(L2, training=self.is_train)\n L2 = tf.layers.dropout(L2, rate=1-self.keep_prob, training=self.is_train)\n self.hidden_layers += 1\n\n L3 = tf.layers.conv2d(inputs= L2, filters= 64, kernel_size= [3,3], padding=\"SAME\", activation=tf.nn.relu, kernel_regularizer=self.regularizer)\n L3 = tf.layers.batch_normalization(L2, training=self.is_train)\n L3 = tf.layers.dropout(L2, rate=1-self.keep_prob, training=self.is_train)\n self.hidden_layers += 1\n\n L4 = tf.layers.conv2d(L3, filters= 64, kernel_size= [3,3],padding=\"SAME\", activation=tf.nn.relu, kernel_regularizer=self.regularizer)\n L4 = tf.layers.batch_normalization(L4, training=self.is_train)\n L4 = tf.layers.dropout(L4, rate=1-self.keep_prob, training=self.is_train)\n self.hidden_layers += 1\n\n L5 = tf.layers.conv2d(inputs= L4, filters= 128, kernel_size= [3,3], padding=\"SAME\", activation=tf.nn.relu, kernel_regularizer=self.regularizer)\n L5 = tf.layers.batch_normalization(L5, training=self.is_train)\n L5 = tf.layers.dropout(L5, rate=1-self.keep_prob, training=self.is_train)\n self.hidden_layers += 1\n\n Flat = tf.reshape(L5, [-1, 128*num_time_step*num_input])\n Dense1 = tf.layers.dense(inputs=Flat, units=self.hidden_neurons, activation=tf.nn.relu, kernel_regularizer=self.regularizer)\n Dense1 = tf.layers.batch_normalization(Dense1, training=self.is_train)\n self.hidden_layers += 1\n\n Dense2 = tf.layers.dense(inputs=Dense1, units=self.hidden_neurons, activation=tf.nn.relu, kernel_regularizer=self.regularizer)\n Dense2 = tf.layers.batch_normalization(Dense2, training=self.is_train)\n self.hidden_layers += 1\n \n self.logits = tf.layers.dense(inputs=Dense2, units=num_output)\n self.hypothesis = tf.nn.softmax(self.logits)\n self.hypothesis = tf.identity(self.hypothesis, \"hypothesis\")\n\n self.l2_reg = tf.losses.get_regularization_loss()\n self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.logits, labels=self.Y))\n self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(self.update_ops):\n self.optimizer = tf.train.AdamOptimizer(learning_rate= learning_rate).minimize(self.cost+self.l2_reg)\n \n self.prediction = tf.argmax(self.hypothesis, 1)\n self.correct_prediction = tf.equal(self.prediction, tf.argmax(self.Y, 1))\n self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))\n\n def get_mean_error_hypothesis(self, x_test, y_test, keep_prop=1.0, is_train=False):\n return self.sess.run([self.accuracy, self.hypothesis, self.X, self.Y, self.l2_reg, self.cost], feed_dict={self.X: x_test, self.Y: y_test, self.keep_prob: keep_prop, self.is_train: is_train})\n\n def train(self, x_data, y_data, keep_prop=1.0, is_train=True):\n return self.sess.run([self.accuracy, self.cost, self.l2_reg, self.optimizer], feed_dict={\n self.X: x_data, self.Y: y_data, self.keep_prob: keep_prop, self.is_train: is_train})\n\n def next_batch(self, num, data):\n x_batch = []\n y_batch = []\n i = 0\n for line in data:\n line = [float(i) for i in line]\n for j in range(num_time_step):\n x_batch.append(line[j*num_input:(j+1)*num_input])\n y_batch.append(line[-num_output:])\n i = i+1\n\n if i == num:\n break\n\n return [np.asarray(np.reshape(x_batch, (-1, num_time_step*num_input))), np.asarray(np.reshape(y_batch,(-1,num_output)))]\n\n def get_hidden_number(self):\n return [self.hidden_layers, self.hidden_neurons]\n\n# input/output number\nnum_input = 36\nnum_output = 2\nnum_time_step = 5\n\n# parameters\nlearning_rate = 0.000050 #0.000001\ntraining_epochs = 100\nbatch_size = 100\ntotal_batch = 4490 # joint: 4929, random:4490\ntotal_batch_val = 962 # joint: 1056, random: 962\ntotal_batch_test = 962 # joint: 1056, random: 962\ndrop_out = 0.75\nregul_factor = 0.001\n\n\n# initialize\nsess = tf.Session()\nm1 = Model(sess, \"m1\")\nsess.run(tf.global_variables_initializer())\n\n\nif wandb_use == True:\n wandb.config.epoch = training_epochs\n wandb.config.batch_size = batch_size\n wandb.config.learning_rate = learning_rate\n wandb.config.drop_out = drop_out\n wandb.config.num_input = num_input\n wandb.config.num_output = num_output\n wandb.config.total_batch = total_batch\n wandb.config.activation_function = \"Sigmoid\"\n wandb.config.training_episode = 1200\n wandb.config.hidden_layers, wandb.config.hidden_neurons = m1.get_hidden_number()\n wandb.config.L2_regularization = regul_factor \n\n# train my model\ntrain_mse = np.zeros(training_epochs)\nvalidation_mse = np.zeros(training_epochs)\n\ntrain_cost = np.zeros(training_epochs)\nvalidation_cost = np.zeros(training_epochs)\n\nfor epoch in range(training_epochs):\n accu_train = 0\n accu_val = 0\n cost_train = 0\n reg_train = 0\n reg_val = 0\n cost_val = 0\n f = open('../data/random/CNN/training_data_.csv', 'r', encoding='utf-8')\n rdr = csv.reader(f)\n\n for i in range(total_batch):\n batch_xs, batch_ys = m1.next_batch(batch_size, rdr)\n c, cost, reg, _ = m1.train(batch_xs, batch_ys, drop_out)\n accu_train += c / total_batch\n cost_train += cost / total_batch\n reg_train += reg / total_batch\n\n f_val = open('../data/random/CNN/validation_data_.csv', 'r', encoding='utf-8')\n rdr_val = csv.reader(f_val)\n for i in range(total_batch_val):\n batch_xs_val, batch_ys_val = m1.next_batch(batch_size, rdr_val)\n c, _, _, _ , reg, cost = m1.get_mean_error_hypothesis(batch_xs_val, batch_ys_val)\n accu_val += c / total_batch_val\n reg_val += reg / total_batch_val\n cost_val += cost / total_batch_val\n\n print('Epoch:', '%04d' % (epoch + 1))\n print('Train Accuracy =', '{:.9f}'.format(accu_train))\n print('Validation Accuracy =', '{:.9f}'.format(accu_val))\n print('Train Cost =', '{:.9f}'.format(cost_train), 'Train Regul =', '{:.9f}'.format(reg_train))\n print('Validation Cost =', '{:.9f}'.format(cost_val), 'Validation Regul =', '{:.9f}'.format(reg_val))\n\n train_mse[epoch] = accu_train\n validation_mse[epoch] = accu_val\n\n train_cost[epoch] = cost_train\n validation_cost[epoch] = cost_val\n\n if wandb_use == True:\n wandb.log({'training Accuracy': accu_train, 'validation Accuracy': accu_val})\n wandb.log({'training cost': cost_train, 'training reg': reg_train, 'validation cost': cost_val, 'validation reg': reg_val})\n\n # if epoch % 20 ==0:\n # for var in tf.trainable_variables():\n # name = var.name\n # wandb.log({name: sess.run(var)})\n\n\nprint('Learning Finished!')\n\nf_test = open('../data/random/CNN/testing_data_.csv', 'r', encoding='utf-8')\nrdr_test = csv.reader(f_test)\naccu_test = 0\nreg_test = 0\ncost_test = 0\n\nfor i in range(total_batch_test):\n batch_xs_test, batch_ys_test = m1.next_batch(batch_size, rdr_test)\n c, _, _, _, reg, cost = m1.get_mean_error_hypothesis(batch_xs_test, batch_ys_test)\n accu_test += c / total_batch_test\n reg_test += reg / total_batch_test\n cost_test += cost / total_batch_test\nprint('Test Accuracy: ', accu_test)\nprint('Test Cost: ', cost_test)\n\nelapsed_time = time.time() - start_time\nprint(elapsed_time)\n\nsaver = tf.train.Saver()\nsaver.save(sess,'model/model.ckpt')\n\nif wandb_use == True:\n saver.save(sess, os.path.join(wandb.run.dir, 'model/model.ckpt'))\n wandb.config.elapsed_time = elapsed_time\n\n#epoch = np.arange(training_epochs)\n#plt.plot(epoch, train_mse, 'r', label='train')\n#plt.plot(epoch, validation_mse, 'b', label='validation')\n#plt.legend()\n#plt.xlabel('epoch')\n#plt.ylabel('abs error')\n#plt.show()\n","repo_name":"kdh0429/learning_collision_detection_real","sub_path":"CNN/collision_detection_nn.py","file_name":"collision_detection_nn.py","file_ext":"py","file_size_in_byte":9503,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"4"} +{"seq_id":"11828307511","text":"\nimport sys\nimport argparse\nfrom RNN_depth_trainer_mtv_occ import *\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\n\ndef get_available_gpus():\n \"\"\"\n Returns a list of the identifiers of all visible GPUs.\n \"\"\"\n from tensorflow.python.client import device_lib\n local_device_protos = device_lib.list_local_devices()\n return [x.name for x in local_device_protos if x.device_type == 'GPU']\n\n\n\ndef assign_to_device(device, ps_device):\n \"\"\"Returns a function to place variables on the ps_device.\n\n Args:\n device: Device for everything but variables\n ps_device: Device to put the variables on. Example values are /GPU:0 and /CPU:0.\n\n If ps_device is not set then the variables will be placed on the default device.\n The best device for shared varibles depends on the platform as well as the\n model. Start with CPU:0 and then test GPU:0 to see if there is an\n improvement.\n \"\"\"\n def _assign(op):\n\n PS_OPS = [\n 'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',\n 'MutableHashTableOfTensors', 'MutableDenseHashTable'\n ]\n node_def = op if isinstance(op, tf.NodeDef) else op.node_def\n if node_def.op in PS_OPS:\n return ps_device\n else:\n return device\n return _assign\n\n\n# Source:\n# https://github.com/tensorflow/models/blob/master/tutorials/image/cifar10/cifar10_multi_gpu_train.py#L101\ndef average_gradients(tower_grads):\n \"\"\"Calculate the average gradient for each shared variable across all towers.\n Note that this function provides a synchronization point across all towers.\n Args:\n tower_grads: List of lists of (gradient, variable) tuples. The outer list ranges\n over the devices. The inner list ranges over the different variables.\n Returns:\n List of pairs of (gradient, variable) where the gradient has been averaged\n across all towers.\n \"\"\"\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n\n # Note that each grad_and_vars looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n grads = [g for g, _ in grad_and_vars]\n grad = tf.reduce_mean(grads, 0)\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n return average_grads\n\n\ndef main():\n\n # Add flags\n parser = argparse.ArgumentParser(description=\"Train RNN depth\")\n parser.add_argument('--dataset', default=\"\", choices=['midair', 'tartanair', 'kitti'], help=\"\"\"Dataset to use (midair, tartanair or kitti)\"\"\")\n parser.add_argument(\"--dataset_dir\", type=str, default=\"\", help=\"The path to the data directory\")\n parser.add_argument(\"--records_dir\", type=str, default=\"\", help=\"The path to the records directory\")\n parser.add_argument(\"--checkpoint_dir\", type=str, default=\"/playpen1/rui/RNN_depth/models/\", help=\"The path to the checkpoint directory\")\n parser.add_argument(\"--h\", type=int, default=384, help=\"Desired output height\")\n parser.add_argument(\"--w\", type=int, default=384, help=\"Desired output width\")\n parser.add_argument(\"--continue_train\", type=bool, default=False, help=\"Continue train\")\n parser.add_argument(\"--restore_path\", type=str, default=\"\", help=\"The path to load checkpoint\")\n parser.add_argument(\"--eval_set_dir\", type=str, default=None, help=\"The path to the evaluation directory\")\n parser.add_argument(\"--num_epochs\", type=int, default=20, help=\"The number of training epochs\")\n parser.add_argument(\"--summary_freq\", type=int, default=100, help=\"The frequence to summarize and save model\")\n parser.add_argument(\"--eval_freq\", type=int, default=1000, help=\"The frequence to evaluate model\")\n parser.add_argument(\"--save_latest_freq\", type=int, default=5000, help=\"The frequence to save model\")\n parser.add_argument('--export_pics', dest=\"export_pics\", action=\"store_true\", help=\"Export maps in files\")\n parser.set_defaults(export_pics=False)\n\n\n args = None\n try:\n args = parser.parse_args()\n print(args)\n except:\n return 1\n\n # Initialize trainer object\n m_trainer = RNN_depth_trainer()\n\n\n # A boolean evaluate every # steps\n\n\n eval_dataLoader = m_trainer.initDataloader(args.dataset, args.eval_set_dir, args.records_dir, batch_size=1, num_epochs=1,is_training=False, img_height=args.h, img_width=args.w)\n\n\n # Multiple GPU\n devices = get_available_gpus()\n\n # Multiple grad and loss\n tower_grads = []\n tower_loss = []\n\n # Optimization method\n learning_rate = 0.0001\n beta = 0.9\n global_step = tf.train.get_or_create_global_step()\n optim = tf.train.AdamOptimizer(learning_rate, beta)\n\n controller=\"/cpu:0\"\n with tf.variable_scope(tf.get_variable_scope()) as outer_scope:\n for i, id in enumerate([devices[0]]):\n name = 'tower_{}'.format(i)\n\n # Loop through all available GPU\n with tf.device(assign_to_device(id, controller)), tf.name_scope(name):\n\n # Load a sample\n eval_data_dict = m_trainer.load_data(eval_dataLoader)\n\n # Forward network\n estimates = m_trainer.construct_model(eval_data_dict) #\n\n # Compute Loss\n\n perfs, names, out_maps = m_trainer.compute_perfs(estimates, eval_data_dict)# est_depths_bw, est_poses_bw,\n\n outer_scope.reuse_variables()\n\n # Run\n m_trainer.eval(perfs, names, out_maps, args, eval_data_dict)\n\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"michael-fonder/M4Depth-Baselines","sub_path":"RNN_depth_pose/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":5741,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"70280841396","text":"import os\n\nfrom auto_parallel.semi_auto_parallel_simple_net import (\n DemoNet,\n TestSimpleNetForSemiAutoParallel,\n)\n\nimport paddle\nimport paddle.distributed as dist\n\n\nclass TestSimpleNetHybridStrategyForSemiAutoParallel(\n TestSimpleNetForSemiAutoParallel\n):\n def __init__(self):\n self._dtype = os.getenv(\"dtype\")\n self._backend = os.getenv(\"backend\")\n self._seed = eval(os.getenv(\"seed\"))\n self._mesh = dist.ProcessMesh([[0, 1], [2, 3]], dim_names=[\"x\", \"y\"])\n\n paddle.set_device(self._backend)\n\n self.set_random_seed(self._seed)\n self.init_single_card_net_result()\n\n def test_dp_mp_demo_net(self):\n self.set_random_seed(self._seed)\n model = dist.shard_layer(\n DemoNet(\"dp_mp_hybrid_strategy\"), self._mesh, self.shard_fn\n )\n\n (\n self.dp_mp_loss,\n self.dp_mp_parameters,\n ) = self.run_dynamic(model, shard_input=True)\n\n self.check_tensor_eq(self.dp_mp_loss, self.base_loss)\n for param, param_base in zip(\n self.dp_mp_parameters, self.base_parameters\n ):\n self.check_tensor_eq(param, param_base)\n self.check_tensor_eq(param.grad, param_base.grad)\n\n def run_test_case(self):\n self.test_dp_mp_demo_net()\n\n\nif __name__ == '__main__':\n TestSimpleNetHybridStrategyForSemiAutoParallel().run_test_case()\n","repo_name":"sueszli/vector-database-benchmark","sub_path":"dataset/python/semi_auto_parallel_simple_net_dp_mp.py","file_name":"semi_auto_parallel_simple_net_dp_mp.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"1948074793","text":"from setuptools import setup\nimport os\n\npackages = []\nroot_dir = os.path.dirname(__file__)\nif root_dir:\n os.chdir(root_dir)\n\nfor dirpath, dirnames, filenames in os.walk('bw2data'):\n # Ignore dirnames that start with '.'\n if '__init__.py' in filenames:\n pkg = dirpath.replace(os.path.sep, '.')\n if os.path.altsep:\n pkg = pkg.replace(os.path.altsep, '.')\n packages.append(pkg)\n\nv_temp = {}\nwith open(\"bw2data/version.py\") as fp:\n exec(fp.read(), v_temp)\nversion = \".\".join((str(x) for x in v_temp[\"version\"]))\n\nsetup(\n name='bw2data',\n version=version,\n packages=packages,\n python_requires='>=3.5',\n author=\"Chris Mutel\",\n author_email=\"cmutel@gmail.com\",\n license=\"3-clause BSD\",\n install_requires=[\n \"appdirs\",\n \"bw2parameters\",\n \"bw_processing\",\n \"docopt\",\n \"fasteners\",\n \"lxml\",\n \"numpy\",\n \"peewee>=3.9.4\",\n \"psutil\",\n \"pyprind\",\n \"requests>=1.1.0\",\n \"scipy\",\n \"stats_arrays\",\n \"voluptuous\",\n \"whoosh\",\n \"wrapt\",\n ],\n url=\"https://github.com/brightway-lca/brightway2-data\",\n long_description=open('README.rst').read(),\n description=('Tools for the management of inventory databases '\n 'and impact assessment methods. Part of the Brightway2 LCA Framework'),\n entry_points = {\n 'console_scripts': [\n 'bw2-uptodate = bw2data.bin.bw2_uptodate:main',\n ]\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Scientific/Engineering :: Visualization',\n ],)\n","repo_name":"tsgfan07/brightway2-data","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"4"} +{"seq_id":"8442321210","text":"import pandas as pd\r\nimport numpy as np\r\n'''\r\ndata_15 = pd.read_csv('data/123.csv', encoding='utf-8')\r\nprint(data_15)\r\n#a = (data_15.groupby(['year', 'team']).sum().loc[lambda df: df.r > 100])\r\n#print(a)\r\ndf = data_15['a'].groupby(data_15['f']).mean()\r\nprint(df)\r\nfor name, group in data_15.groupby(data_15['w']):\r\n print(name)\r\n print(group)\r\nstates=np.array([1,1,2,2,2])\r\nyears=np.array([1,2,3,4,4])\r\na = data_15['a'].groupby([states,years]).mean()\r\nprint(a)\r\n'''\r\ndata_15 = pd.read_csv('data/0927-1.csv', encoding='utf_8_sig')\r\ndef Time_chuli(input_data):\r\n time_list = input_data['开始时间']\r\n year = list()\r\n month = list()\r\n day = list()\r\n hour =list()\r\n minute = list()\r\n for net in time_list:\r\n #2018-09-26 22:45:00\r\n year.append(net[:4])\r\n month.append(net[5:7])\r\n day.append(net[8:10])\r\n hour.append(net[11:13])\r\n #minute.append(net[14:16])\r\n time_df = pd.DataFrame({'year': year, 'month': month, 'day':day, 'hour':hour})\r\n result = pd.concat([input_data, time_df], axis=1, sort=False)[['year', 'month', \\\r\n 'day', 'hour', '小区名称', 'MR-RRC连接建立最大用户数','小区载频PUSCH可用的PRB个数']]\r\n\r\n return result\r\n\r\n#取3个字段并处理时间\r\ndata_15_1 = Time_chuli(data_15[['开始时间', '小区名称', 'MR-RRC连接建立最大用户数','小区载频PUSCH可用的PRB个数']])\r\n#print(data_15_1)\r\n#df = data_15['a'].groupby(data_15['f']).mean()\r\n#print(df)\r\n#print(data_15_1)\r\nhours=np.array(data_15_1['hour'])\r\ncells=np.array(data_15_1['小区名称'])\r\nprint(cells)\r\na = data_15['MR-RRC连接建立最大用户数'].groupby([hours,cells]).mean()\r\nb = data_15[['MR-RRC连接建立最大用户数','小区载频PUSCH可用的PRB个数']].groupby([cells,hours]).mean()\r\n#print(b)\r\n\r\n#b.to_csv('111.csv', encoding='utf_8_sig')","repo_name":"Jizishuo/tensorflow-work","sub_path":"0927-merge-2.py","file_name":"0927-merge-2.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"11208114562","text":"import os\nimport streamlit.components.v1 as components\n\n# Create a _RELEASE constant. We'll set this to False while we're developing\n# the component, and True when we're ready to package and distribute it.\n# (This is, of course, optional - there are innumerable ways to manage your\n# release process.)\n_RELEASE = True\n\n# Declare a Streamlit component. `declare_component` returns a function\n# that is used to create instances of the component. We're naming this\n# function \"_component_func\", with an underscore prefix, because we don't want\n# to expose it directly to users. Instead, we will create a custom wrapper\n# function, below, that will serve as our component's public API.\n\n# It's worth noting that this call to `declare_component` is the\n# *only thing* you need to do to create the binding between Streamlit and\n# your component frontend. Everything else we do in this file is simply a\n# best practice.\n\nif not _RELEASE:\n _component_func = components.declare_component(\n # We give the component a simple, descriptive name (\"my_component\"\n # does not fit this bill, so please choose something better for your\n # own component :)\n \"st_org_chart\",\n # Pass `url` here to tell Streamlit that the component will be served\n # by the local dev server that you run via `npm run start`.\n # (This is useful while your component is in development.)\n url=\"http://localhost:3001\",\n )\nelse:\n # When we're distributing a production version of the component, we'll\n # replace the `url` param with `path`, and point it to to the component's\n # build directory:\n parent_dir = os.path.dirname(os.path.abspath(__file__))\n build_dir = os.path.join(parent_dir, \"frontend/build\")\n _component_func = components.declare_component(\"st_org_chart\", path=build_dir)\n\n\n# Create a wrapper function for the component. This is an optional\n# best practice - we could simply expose the component function returned by\n# `declare_component` and call it done. The wrapper allows us to customize\n# our component's API: we can pre-process its input args, post-process its\n# output value, and add a docstring for users.\ndef st_org_chart(\n chart_data=[],\n key=None):\n\n response = _component_func(chart_data=chart_data, key=key)\n\n return None\n\n\n# Add some test code to play with the component while it's in development.\n# During development, we can run this just as we would any other Streamlit\n# app: `$ streamlit run my_component/__init__.py`\n","repo_name":"kmcgrady/streamlit-org-chart","sub_path":"streamlit_org_chart/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"4132755604","text":"def rotate_matrix_clockwise(matrix: list):\n matrix.reverse()\n for i in range(len(matrix)):\n for j in range(i):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n return matrix\n\n\ndef rotate_matrix_counterclockwise(matrix: list):\n matrix.reverse()\n for i in range(len(matrix)):\n for j in range(len(matrix) - i):\n i_ = len(matrix) - 1 - i\n j_ = len(matrix) - 1 - j\n matrix[i][j], matrix[j_][i_] = matrix[j_][i_], matrix[i][j]\n return matrix\n\n\ndef print_matrix(matrix: list):\n for row in matrix:\n for val in row:\n print(val, end='\\t')\n print()\n print()\n\n\nm = [[1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 0, 1, 2],\n [3, 4, 5, 6]]\n\nprint_matrix(m)\nprint_matrix(rotate_matrix_clockwise(m))\nprint_matrix(rotate_matrix_counterclockwise(m))\n","repo_name":"nozokada/cracking-the-coding-interview","sub_path":"random_notes/rotate_matrix.py","file_name":"rotate_matrix.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"73705970997","text":"from flask import render_template\nfrom flask_login import login_required\nfrom app.deployment import bp\nfrom app.models import Cluster, Deployment, Deployment_log\nfrom flask_login import current_user\nfrom flask_table import Table, Col\nimport json\nfrom sqlalchemy import desc\nfrom flask import session\n\n\n@bp.route('/deployment_log/', methods=['GET','POST'])\n#@login_required\ndef deployment_log(deployment_id):\n userID_exist = session['userID_exist']\n cluster_list = []\n if (deployment_id == 0):\n cluster_list = Cluster.query.filter_by(user_id=userID_exist).filter_by(user_id=userID_exist).all()\n return render_template('deployment/deployment_logs.html', title='Deployment Logs', deployment_id=deployment_id,cluster_list=cluster_list)\n\n\n@bp.route('/deployment_log_table/', methods=['GET','POST'])\n#@login_required\ndef deployment_log_table(deployment_id):\n row_headers = [\"id\",\"deployment id\",\"task\",\"timestamp\"]\n deployment_list = Deployment_log.query.filter_by(deployment_id=deployment_id)\n json_data=[]\n for result in deployment_list:\n json_data.append(row2dict(result))\n return json.dumps(json_data)\n\n@bp.route('/deployment_log', methods=['GET','POST'])\n#@login_required\ndef deployment_log1(deployment_id):\n class ItemTable(Table):\n task = Col('Task Name')\n timestamp = Col('Created At')\n dep_all_tasks = Deployment_log.query.filter_by(deployment_id=deployment_id).all()\n table = ItemTable(dep_all_tasks)\n table.border = True\n return render_template('deployment/deployment_logs.html', title='Deployment Logs', deployment_id=deployment_id)\n\ndef row2dict(row):\n d = {}\n for column in row.__table__.columns:\n d[column.name] = str(getattr(row, column.name))\n\n return d\n\n@bp.route('/deployment_list_cluster_id/', methods=['GET','POST'])\n#@login_required\ndef deployment_list_cluster_id(cluster_id):\n row_headers = [\"id\",\"name\",\"cluster_id\",\"timestamp\"]\n deployment_list = Deployment.query.filter_by(cluster_id=cluster_id).order_by(desc(Deployment.id))\n json_data=[]\n for result in deployment_list:\n json_data.append(row2dict(result))\n return json.dumps(json_data)\n\n@bp.route('/deployment_log_spec_id/', methods=['GET','POST'])\n#@login_required\ndef deployment_log_spec_id(deployment_id):\n row_headers = [\"id\",\"deployment id\",\"task\",\"timestamp\"]\n deployment_list = Deployment_log.query.filter_by(deployment_id=deployment_id).order_by(desc(Deployment_log.id))\n json_data=[]\n for result in deployment_list:\n json_data.append(row2dict(result))\n return json.dumps(json_data)\n","repo_name":"Fiware-Community/kubernetes-in-FiwareLab","sub_path":"UI/K8s/K8s_ui/microblog2/app/deployment/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"41385016009","text":"# Using the below data and writing Python code produce the following results\r\n # 1. Resultant file with name and total scores for each student.\r\n # 2. Another file with top scorer’s name, subject and score in each subject.\r\n\r\n# Student_name,subj1,score1,subj2,score2\r\n# abc,math,89,science,91\r\n# xyz,math,90,science,95\r\n# cde,math,86,science,78\r\n\r\n# Importing necessary packages into Python.\r\n\r\nimport pandas as pd #Loading Panda build data and to peform any data analysis opertations\r\n\r\n# Create the list\r\ndata = {'Student_name': ['abc', 'xyz', 'cde'], 'subj1': ['math', 'math', 'math'], 'score1': [89, 90,86], 'subj2': ['science', 'science', 'science'],'score2': [91, 95,78]}\r\n\r\n# Converting list into a data frame\r\ndf = pd.DataFrame(data) \r\n\r\n# Converting the dataframe from a wide format to a long format for easy analysis of data.\r\n# Creating two datasets by pulling Sub1 and Sub2 separately\r\ndf1 = df[['Student_name','subj1','score1']]\r\ndf2 = df[['Student_name','subj2','score2']]\r\n\r\n# Renaming the columns to standard names\r\ndf1.columns =['Student_name', 'Subject_Name', 'Score']\r\ndf2.columns =['Student_name', 'Subject_Name', 'Score']\r\n\r\n# Appending the two datasets into one dataset\r\nFinal_df = df1.append(df2, sort=False)\r\n\r\n# Indexing the column names\r\n#Final_df = Final_df.reindex(columns=['Student_name', 'Subject_Name', 'Score'])\r\n\r\n# Calculating the total scores for each student and adding the result back to the data frame\r\nFirst_result = Final_df.groupby(['Student_name']).agg({'Score': \"sum\"})\r\n\r\n# Exporting the First results to a csv file\r\nFirst_result.to_csv('first_result.csv', sep=',')\r\n\r\n# Rank ordering the scores by subject and by student to get students with highest score in each subject\r\nFinal_df['Rank'] = Final_df.groupby(['Subject_Name'])['Score'].rank(method='max', ascending = False)\r\n\r\n# Filtering on students that scored highest in both Maths and Science\r\nSecond_result = Final_df.loc[Final_df.Rank == 1, [\"Student_name\", \"Subject_Name\", \"Score\"]]\r\n\r\n# Exporting the First results to a csv file\r\nSecond_result.to_csv('second_result.csv', sep=',',index=False)\r\n","repo_name":"kantipudi6969/Python_Test","sub_path":"Python script/Python_Project.py","file_name":"Python_Project.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"38841830828","text":"from django.urls import path\n\nfrom .mixinViews import (\n TodoListMixin,\n TodoCreateMixin,\n TodoRetriveMixin,\n TodoPutMixin,\n TodoDeleteMixin,\n home,\n)\n\n\nurlpatterns = [\n path(\"list/\", TodoListMixin.as_view(), name=\"get\"),\n path(\"create/\", TodoCreateMixin.as_view(), name=\"post\"),\n path(\"retrive/\", TodoRetriveMixin.as_view(), name=\"retrieve\"),\n path(\"delete/\", TodoDeleteMixin.as_view(), name=\"destroy\"),\n path(\"update/\", TodoPutMixin.as_view(), name=\"put\"),\n]","repo_name":"smooth-55/DRF-differentViews","sub_path":"generic_views/mixins/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"30026695599","text":"import pytest\nimport json\nfrom shlscraper.spiders.khl import KhlSpider\n\n\n@pytest.fixture()\ndef spider():\n return KhlSpider()\n\n\nwith open('json/khlLeague.json') as f:\n data = json.load(f)\n# print(data)\n\n\n@pytest.mark.parametrize(\n [\"url\", \"expected\"],\n [\n (\n \"https://en.khl.ru/stat/teams/\",\n data\n )\n ],\n)\ndef test_parse(spider, response, expected):\n result = next(spider.parse(response))\n assert result == expected, \"Data json dump is wrong KeyValue Error\"\n\n\nprint('=======================================')\nprint('\\ttest case is passed')\nprint('=======================================')\n","repo_name":"vsinghal3737/Ice-Hockey-Leagues-WebScraping","sub_path":"shlscraper/test_khl.py","file_name":"test_khl.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"17857574072","text":"'''\nExtract maximally and minamlly face-response voxels from each ROI\n\nUses a parametric face cov to find voxel with highest correlation to the time series\n'''\n\ncurr_dir = '/user_data/vayzenbe/GitHub_Repos/ginn'\n\nimport sys\n\nsys.path.insert(1, f'{curr_dir}')\n\nimport warnings\nimport os\n\n\nwarnings.filterwarnings(\"ignore\")\n\nimport pandas as pd\nimport numpy as np\nfrom nilearn import signal\nimport pdb\n\nimport ginn_params as params\nimport analysis_funcs as af\n\n# threshold for PCA\nglobal_signal = 'mean'\nuse_pc_thresh = True\n\n\n\nexp = 'aeronaut'\nstudy_dir,subj_dir, sub_list, vid, file_suf, fix_tr, data_dir, vols, tr, fps, bin_size, ages = params.load_params(exp)\n\nout_dir = f'{subj_dir}/group_func'\n#results_dir = f'{curr_dir}/results/isc'\n\nroi_dir = f'{study_dir}/derivatives/rois'\n\nrois = ['LOC','FFA','A1','EVC'] + ['lLOC','lFFA','lA1','lEVC'] + ['rLOC','rFFA','rA1','rEVC']\n#rois = ['rFFA']\n\nface_cov = np.load(f'{curr_dir}/fmri/pre_proc/{exp}_parametric_face_cov.npy')\n\nfor sub in sub_list['participant_id']:\n sub_dir = f'{subj_dir}/sub-{sub}/timeseries'\n\n\n for roi in rois:\n print(f'{sub} {roi}')\n #load roi data\n roi_data = np.load(f'{sub_dir}/{roi}_ts_all.npy')\n \n roi_data = roi_data[fix_tr:,:]\n\n #remove voxels cols with 0s\n roi_data = roi_data[:,~np.all(roi_data == 0, axis=0)]\n\n #correlate each voxel to face cov\n roi_corr = np.zeros(roi_data.shape[1])\n for v in range(roi_data.shape[1]):\n roi_corr[v] = np.corrcoef(roi_data[:,v],face_cov)[0,1]\n\n #get top 10% of voxels\n top_10 = np.argsort(roi_corr,axis=0)[-int(roi_data.shape[1]*0.1):]\n #extract top 10% of voxels\n top_10_roi_data = roi_data[:,top_10]\n\n\n #append 3 dummy values to front of time series to match other roi data\n top_10_roi_data = np.concatenate((np.zeros((3,top_10_roi_data.shape[1])),top_10_roi_data),axis=0)\n \n #save mean of top 10% of voxels\n np.save(f'{sub_dir}/{roi}_face.npy',top_10_roi_data)\n\n #get bottom 10% of voxels\n bottom_10 = np.argsort(roi_corr,axis=0)[:int(roi_data.shape[1]*0.1)]\n #extract bottom 10% of voxels\n bottom_10_roi_data = roi_data[:,bottom_10]\n \n #save mean of bottom 10% of voxels\n np.save(f'{sub_dir}/{roi}_nonface.npy',bottom_10_roi_data)\n \n #save roi_correlations\n np.save(f'{sub_dir}/{roi}_roi_corr.npy',roi_corr)\n \n\n\n\n\n\n\n\n\n","repo_name":"vayzenb/ginn","sub_path":"fmri/extract_face_vox.py","file_name":"extract_face_vox.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"27572051681","text":"# -*- coding: utf-8 -*-\nfrom components.freeze.base import FrozenOnly\nfrom objects.CSCG._2d.fields.vector.do.reconstruct.main import _2dCSCG_Vector_Do_Reconstruct\nfrom objects.CSCG._2d.fields.vector.do.inner_product.main import _2CSCG_VectorField_InnerProduct\n\nfrom components.quadrature import Quadrature\nfrom root.config.main import RANK, MASTER_RANK, COMM, np\n\n\nclass _2dCSCG_VectorField_DO(FrozenOnly):\n def __init__(self, vf):\n self._vf_ = vf\n self._reconstruct_ = _2dCSCG_Vector_Do_Reconstruct(vf)\n self._inner_product_ = _2CSCG_VectorField_InnerProduct(vf)\n self._freeze_self_()\n\n def evaluate_func_at_time(self, time=None):\n return self._vf_.___DO_evaluate_func_at_time___(time=time)\n\n @property\n def reconstruct(self):\n return self._reconstruct_\n\n @property\n def inner_product(self):\n return self._inner_product_\n\n def compute_Ln_norm(self, n=2, quad_degree=None):\n \"\"\"We compute Ln norm of self.\n\n int(self)**(n) over the mesh.\n \"\"\"\n if quad_degree is None:\n quad_degree = (7, 7)\n\n vf = self._vf_\n\n if vf.ftype == 'standard':\n qnx, qny, quad_weights = Quadrature(quad_degree).quad_ndim\n\n QuadValue = vf.do.reconstruct(qnx, qny)[1]\n detJ = vf.mesh.elements.coordinate_transformation.Jacobian(qnx, qny)\n\n local_norm = list()\n for i in QuadValue:\n\n local_norm.append(\n np.sum(\n ((QuadValue[i][0])**n+(QuadValue[i][1])**n+(QuadValue[i][2])**n)\n * quad_weights\n * detJ[i]\n )\n )\n\n local_norm = np.sum(local_norm)\n\n local_norm = COMM.gather(local_norm, root=MASTER_RANK)\n if RANK == MASTER_RANK:\n local_norm = np.sum(local_norm) ** (1/n)\n else:\n pass\n\n local_norm = COMM.bcast(local_norm, root=MASTER_RANK)\n\n return local_norm\n\n else:\n raise NotImplementedError(f\"L^{n}-norm of 2dCSCG scalar is not implemented.\")\n","repo_name":"mathischeap/mifem","sub_path":"objects/CSCG/_2d/fields/vector/do/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"1847396006","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 16 21:54:20 2020\n\n@author: nenad\n\"\"\"\n\nclass Node:\n def __init__(self, info): \n self.info = info \n self.left = None \n self.right = None \n self.level = None \n\n def __str__(self):\n return str(self.info) \n\nclass BinarySearchTree:\n def __init__(self): \n self.root = None\n\n def create(self, val): \n if self.root == None:\n self.root = Node(val)\n else:\n current = self.root\n \n while True:\n if val < current.info:\n if current.left:\n current = current.left\n else:\n current.left = Node(val)\n break\n elif val > current.info:\n if current.right:\n current = current.right\n else:\n current.right = Node(val)\n break\n else:\n break\n \nvals = [3,2,5,1,4,6,7]\nbst = BinarySearchTree()\nfor val in vals:\n bst.create(val) \n\n# count longest sequence of nodes and then just decrement 1 (no of edges for that sequence) \ndef height_of_bst(root):\n if root is None:\n # annulate \n return -1\n return 1 + max(height_of_bst(root.left), height_of_bst(root.right))\n\nprint(height_of_bst(bst.root))\n\n# calculate height of root node\ndef height_of_bst(root):\n if root is None or (root.left is None and root.right is None):\n return 0\n return 1 + max(height_of_bst(root.left), height_of_bst(root.right))\n\nprint(height_of_bst(bst.root))\n ","repo_name":"NenadPantelic/HackerRank-Problem-Solving","sub_path":"Problem Solving - Interview preparation/7.Trees/TreeHeightOfBinaryTree.py","file_name":"TreeHeightOfBinaryTree.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"11860762302","text":"import numpy as np\nimport pandas as pd\n\n\n# Import data\ndata = pd.read_csv('/home/rootkit/out.csv')\n\n# Drop date variable\ndata = data.drop(['date'], 1)\ndata = data.drop(['date1'], 1)\ndata = data.drop(['date2'], 1)\n\n# Dimensions of dataset\nn = data.shape[0]\np = data.shape[1]\n\n# Make data a np.array\ndata = data.values\n\n# Training and test data\ntrain_start = 0\ntrain_end = int(np.floor(0.8*n))\ntest_start = train_end + 1\ntest_end = n\ndata_train = data[np.arange(train_start, train_end), :]\ndata_test = data[np.arange(test_start, test_end), :]","repo_name":"nakacodejuice/tensorflow_mytempout","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"352431350","text":"from cfman.__classpath__ import _Classpath\nfrom cfman.core.cf_manager import CFManager\nfrom clitt.core.tui.tui_application import TUIApplication\nfrom hspylib.core.enums.charset import Charset\nfrom hspylib.core.zoned_datetime import now\nfrom hspylib.modules.application.argparse.parser_action import ParserAction\nfrom hspylib.modules.application.exit_status import ExitStatus\nfrom hspylib.modules.application.version import Version\nfrom textwrap import dedent\n\nimport logging as log\nimport os\nimport sys\n\n\nclass Main(TUIApplication):\n \"\"\"Cloud Foundry Manager - Manage PCF applications.\"\"\"\n\n # The welcome message\n DESCRIPTION = _Classpath.get_source_path(\"welcome.txt\").read_text(encoding=Charset.UTF_8.val)\n\n # location of the .version file\n VERSION_DIR = _Classpath.source_path()\n\n def __init__(self, app_name: str):\n version = Version.load(load_dir=self.VERSION_DIR)\n super().__init__(app_name, version, self.DESCRIPTION.format(version))\n self._cf_manager = None\n\n def _setup_arguments(self) -> None:\n \"\"\"Initialize application parameters and options.\"\"\"\n # fmt: off\n self._with_options() \\\n .option(\"api\", \"a\", \"api\", \"the API endpoint to connect to (e.g. https://api.example.com)\", nargs=\"?\")\\\n .option(\"org\", \"o\", \"org\", \"the organization to connect to (Target organization)\", nargs=\"?\")\\\n .option(\"space\", \"s\", \"space\", \"the space to connect to (Target organization space)\", nargs=\"?\")\\\n .option(\"username\", \"u\", \"username\", \"the PCF username\", nargs=\"?\")\\\n .option(\"password\", \"p\", \"password\", \"the PCF password\", nargs=\"?\")\\\n .option(\n \"no-cache\", \"r\", \"no-cache\", \"avoiding using cached apps\",\n nargs=\"?\", action=ParserAction.STORE_TRUE)\\\n .option(\n \"endpoints\", \"f\", \"endpoints\",\n \"the file containing the CF API endpoint entries. \"\n \"If not provided, '$HOME/cf_endpoints.txt' will be used instead.\",\n nargs=1\n )\n # fmt: on\n\n def _main(self, *params, **kwargs) -> ExitStatus:\n \"\"\"Run the application with the command line arguments.\"\"\"\n self._cf_manager = CFManager(\n self.get_arg(\"api\"),\n self.get_arg(\"org\"),\n self.get_arg(\"space\"),\n self.get_arg(\"username\"),\n self.get_arg(\"password\"),\n self.get_arg(\"no-cache\"),\n self.get_arg(\"endpoints\") or f\"{os.getenv('HOME', os.getcwd())}/.cfman_endpoints.txt\",\n )\n log.info(\n dedent(\n f\"\"\"\n {self._app_name} v{self._app_version}\n\n Settings ==============================\n STARTED: {now(\"%Y-%m-%d %H:%M:%S\")}\n \"\"\"\n )\n )\n return self._exec_application()\n\n def _exec_application(self) -> ExitStatus:\n \"\"\"Execute the application.\"\"\"\n self._cf_manager.run()\n return ExitStatus.SUCCESS\n\n\nif __name__ == \"__main__\":\n # Application entry point\n Main(\"cfman\").INSTANCE.run(sys.argv[1:])\n","repo_name":"yorevs/hspylib","sub_path":"modules/cfman/src/main/cfman/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"4"} +{"seq_id":"71112118564","text":"\n# To find out Consonants\n\n\ndef Consonants(C):\n if C not in 'aeiou':\n return True\n\nst1 = input('Enter one string:')\n\n# Con = tuple(filter(Consonants,st1))\n# print(Con)\n# Enter one string:zero is better than copy\n# ('z', 'r', ' ', 's', ' ', 'b', 't', 't', 'r', ' ', 't', 'h', 'n', ' ', 'c', 'p', 'y')\n\n\n# using lambda :\n\nprint()\nprint(tuple(filter(lambda Con : Con not in 'bcdfghjklmnpqrstvwxyz',st1)))\n# ('a', ' ', 'e', ' ', 'a', 'e', ' ', 'a', 'e', ' ', 'u')\n\n\n# print(list(filter(lambda x: x not in 'aeiou','abcdefghijklmnopqrstuvwxyz')))","repo_name":"Mani015/Python_12pm","sub_path":"Python_Notes/Day-26-Inbuilt_Func/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"27719254328","text":"import numpy as np\n\ndef qr_Householder(A):\n n = np.ma.size(A, 0)\n Q = np.identity(n,dtype=None)\n original_A = A\n\n for k in range(1,n):\n P = k_householder(A,k)\n A = P.dot(A)\n Q = Q.dot(P)\n\n R = Q.transpose().dot(original_A)\n\n return Q,R\n\ndef k_householder(M,k):\n n = np.ma.size(M,0)\n P = np.identity(n,dtype=None)\n subM = M[k-1:,k-1:]\n subI = np.identity(np.ma.size(subM,0),dtype=None)\n\n a = subM[:,0]\n u = calculate_U(a)\n v = u/np.linalg.norm(u, ord=None, axis=None)\n subP = subI - 2*v.dot(v.transpose())\n P[k-1:,k-1:] = subP\n return P\n\ndef calculate_U(a):\n alfa = np.linalg.norm(a, ord=None, axis=None) * (-1.0) * np.sign(a[0,0])\n e = np.zeros((len(a),1))\n e[0] = 1\n return a - alfa*e\n","repo_name":"natinavas/Facial-Recognition","sub_path":"methods/Householder.py","file_name":"Householder.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43552734119","text":"import time,subprocess,os\n\ndef screen_record(cmd_list):\n b_res=subprocess.getoutput(cmd_list[0])\n lines=b_res.split('\\n')\n try:\n for line in lines:\n if not line.endswith('device'):\n raise Exception ('device not found')\n except:\n s_cmd=cmd_list[1]+s_path\n s_record=subprocess.getoutput(s_cmd)\n time.sleep(30)\n p_cmd=cmd_list[2]+s_path+d_path\n s_pull=subprocess.getoutput(p_cmd)\n mp4_feach=os.walk('D:\\\\')\n for root,dirs,files in mp4_feach:\n for file in files:\n try:\n if file==d_file:\n return True\n break\n except:\n raise Exception('file not found')\n \nif __name__=='__main__':\n cmd_list=['adb devices','adb shell screenrecord ','adb pull ']\n s_path='//sdcard//s_record.mp4 '\n d_path='D:\\\\adb_fastboot\\\\screen_record.mp4'\n d_file='screen_record.mp4'\n print(screen_record(cmd_list))\n","repo_name":"mupputur/Mobile","sub_path":"Atchyuth/screen_record.py","file_name":"screen_record.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31571581811","text":"from django.urls import path\n\nimport users.views as user_views\n\nurlpatterns = [\n path(\"login\", user_views.CustomLoginView.as_view(), name=\"login\"),\n path(\"logout\", user_views.CustomLogoutView.as_view(), name=\"logout\"),\n path(\"register\", user_views.CustomRegisterView.as_view(), name=\"register\"),\n path(\"profile\", user_views.CustomUserEditView.as_view(), name=\"edit\"),\n path(\"profile/delete\", user_views.CustomUserDeleteView.as_view(), name=\"delete\"),\n]\n","repo_name":"mprostakk/BD2","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19832416337","text":"import os\n\nimport aiohttp\nfrom aiohttp import ClientResponseError, ServerTimeoutError, TooManyRedirects\nfrom assistant import Config, bot, cus_filters\nfrom pyrogram import filters\nfrom pyrogram.types import Message\n\nNEKOBIN_URL = \"https://nekobin.com/\"\n\n@bot.on_message(filters.command(\"paste\") & cus_filters.auth_chats)\nasync def nekobin_paste(_, message: Message):\n \"\"\" Pastes the text directly to Nekobin \"\"\"\n cmd = len(message.text)\n msg = await message.reply(\"`Processing...`\")\n text = None\n if message.text and cmd > 6:\n _, args = message.text.split(maxsplit=1)\n text = args\n replied = message.reply_to_message\n file_ext = '.txt'\n if not cmd > 6 and replied and replied.document and replied.document.file_size < 2 ** 20 * 10:\n file_ext = os.path.splitext(replied.document.file_name)[1]\n path = await replied.download(\"downloads/\")\n with open(path, 'r') as d_f:\n text = d_f.read()\n os.remove(path)\n elif not cmd > 6 and replied and replied.text:\n text = replied.text\n if not text:\n await msg.edit(\"`input not found!`\")\n return\n await msg.edit(\"`Pasting text...`\")\n async with aiohttp.ClientSession() as ses:\n async with ses.post(NEKOBIN_URL + \"api/documents\", json={\"content\": text}) as resp:\n if resp.status == 201:\n response = await resp.json()\n key = response['result']['key']\n final_url = NEKOBIN_URL + key + file_ext\n reply_text = f\"**Nekobin** [URL]({final_url})\"\n await msg.edit(reply_text, disable_web_page_preview=True)\n else:\n await msg.edit(\"`Failed to reach Nekobin`\")\n\n\n\n@bot.on_message(filters.command(\"getpaste\") & cus_filters.auth_chats)\nasync def get_paste_(_, message: Message):\n \"\"\" fetches the content of a Nekobin URL \"\"\"\n if message.text and len(message.text) == 9:\n await message.reply(\"`input not found!`\")\n return\n _, args = message.text.split(maxsplit=1)\n link = args\n msg = await message.reply(\"`Getting paste content...`\")\n if link.startswith(NEKOBIN_URL):\n link = link[len(NEKOBIN_URL):]\n raw_link = f'{NEKOBIN_URL}raw/{link}'\n elif link.startswith(\"nekobin.com/\"):\n link = link[len(\"nekobin.com/\"):]\n raw_link = f'{NEKOBIN_URL}raw/{link}'\n else:\n await msg.edit(\"`Is that even a paste url?`\")\n return\n async with aiohttp.ClientSession(raise_for_status=True) as ses:\n try:\n async with ses.get(raw_link) as resp:\n text = await resp.text()\n except ServerTimeoutError as e_r:\n await msg.edit(f\"`Request timed out -> {e_r}`\")\n except TooManyRedirects as e_r:\n await msg.edit(\"`Request exceeded the configured `\"\n f\"`number of maximum redirections -> {e_r}`\")\n except ClientResponseError as e_r:\n await msg.edit(f\"`Request returned an unsuccessful status code -> {e_r}`\")\n else:\n if len(text) > Config.MAX_MSG_LENGTH:\n await msg.edit(\"`Content Too Large...`\")\n else:\n await msg.edit(\"--Fetched Content Successfully!--\"\n f\"\\n\\n**Content** :\\n`{text}`\")\n","repo_name":"UsergeTeam/Userge-Assistant","sub_path":"assistant/plugins/paste.py","file_name":"paste.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"52"} +{"seq_id":"39175541999","text":"from flask import Blueprint, g, escape, session, redirect, render_template, request, jsonify, Response\nfrom app import DAO\n\nfrom Controllers.UserManager import UserManager\nfrom Controllers.GameManager import GameManager\nimport mysql.connector\n\ngame_view = Blueprint('game_routes', __name__, template_folder='/templates')\n\ngame_manager = GameManager(DAO)\nuser_manager = UserManager(DAO)\nb = {}\n\n@game_view.route('/games/', defaults={'id': None})\n@game_view.route('/games/')\ndef home(id):\n\tuser_manager.user.set_session(session, g)\n\n\tif id != None:\n\t\tb = game_manager.getGame(id)\n\n\t\tprint('----------------------------')\n\t\tprint(b)\n\n\t\tuser_games={}\n\t\tif user_manager.user.isLoggedIn():\n\t\t\tuser_games = game_manager.getReserverdGamesByUser(user_id=user_manager.user.uid())['user_games'].split(',')\n\t\t\n\t\tif b and len(b) <1:\n\t\t\treturn render_template('game_view.html', error=\"No game found!\")\n\n\t\treturn render_template(\"game_view.html\", games=b, g=g, user_games=user_games)\n\telse:\n\t\tb = game_manager.list()\n\n\t\tuser_games=[]\n\t\tif user_manager.user.isLoggedIn():\n\t\t\treserved_games = game_manager.getReserverdGamesByUser(user_id=user_manager.user.uid())\n\t\t\t\n\t\t\tif reserved_games is not None:\n\t\t\t\tuser_games = reserved_games['user_games'].split(',')\n\t\t\n\t\tprint(\"---------------------------------------\")\n\t\t# print(\"USER GAME\", user_games)\n\t\t# print(\"Games\", b)\n\t\tif b and len(b) <1:\n\t\t\treturn render_template('games.html', error=\"No games found!\")\n\t\treturn render_template(\"games.html\", games=b, g=g, user_games=user_games)\n\n\n\treturn render_template(\"games.html\", games=b, g=g)\n\ndef checkIfGameDupli(user_id, game_id):\n\tmydb = mysql.connector.connect(\n\thost=\"localhost\",\n\tuser=\"root\",\n\tpassword=\"\",\n\tdatabase=\"gamesdb\"\n\t)\n\n\tmycursor = mydb.cursor()\n\tmycursor.execute(\"SELECT 1 FROM reserve WHERE user_id = '{}' AND game_id = '{}';\".format(user_id, game_id))\n\tmyresult = mycursor.fetchall()\n\t# print(\"EXIST?\", myresult)\n\tif myresult:\n\t\treturn True\n\telse:\n\t\treturn False\n\n\n@game_view.route('/games/add/', methods=['GET'])\n@user_manager.user.login_required\ndef add(id):\n\tglobal b\n\tuser_id = user_manager.user.uid()\n\texists = checkIfGameDupli(user_id, id)\n\tif exists:\n\t\tb = game_manager.list()\n\t\tuser_manager.user.set_session(session, g)\n\t\t\n\t\treturn render_template(\"games.html\", msg=\"Game already added\", games=b, g=g)\n\telse:\n\t\tgame_manager.reserve(user_id, id)\n\n\t\tb = game_manager.list()\n\t\tuser_manager.user.set_session(session, g)\n\t\t\n\t\treturn render_template(\"games.html\", msg=\"Game added\", games=b, g=g)\n\n\n@game_view.route('/games/search', methods=['GET'])\ndef search():\n\tuser_manager.user.set_session(session, g)\n\n\tif \"keyword\" not in request.args:\n\t\treturn render_template(\"search.html\")\n\n\tkeyword = request.args[\"keyword\"]\n\n\tif len(keyword)<1:\n\t\treturn redirect('/games')\n\n\td=game_manager.search(keyword)\n\n\tif len(d) >0:\n\t\tfor game in d:\n\t\t\tgameId = game['appid']\n\t\t\t# # URL of the Steam game page\n\t\t\t# steam_game_url = 'https://store.steampowered.com/app/' + str(gameId)\n\n\t\t\t# # Send a GET request to the URL\n\t\t\t# response = requests.get(steam_game_url)\n\n\t\t\t# # Parse the HTML content with Beautiful Soup\n\t\t\t# soup = BeautifulSoup(response.content, 'html.parser')\n\n\t\t\t# # Find the image element with the class \"game_header_image_full\"\n\t\t\t# image_element = soup.find(\"img\", class_=\"game_header_image_full\")\n\n\t\t\t# # Get the 'src' attribute from the image element\n\t\t\t# if image_element:\n\t\t\t# \timage_source = image_element['src']\n\t\t\t# \tprint(\"Image Source:\", image_source)\n\t\t\t# else:\n\t\t\t# \tprint(\"Image source not found.\")\n\t\treturn render_template(\"games.html\", search=True, games=d, count=len(d), keyword=escape(keyword), g=g)\n\n\treturn render_template('games.html', error=\"No games found!\", keyword=escape(keyword))","repo_name":"PasionJP/Educational-Video-Game-Recommendation-System","sub_path":"routes/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12741495730","text":"from django.urls import path\nfrom . import views\n\napp_name = \"blogs\"\n\nurlpatterns = [\n path('', views.index_page, name='index'),\n path('list', views.list_page, name='list'),\n path('blog', views.blog_page, name='blog'),\n path('blog/edit', views.blog_edit, name='edit'),\n path('blog/update', views.blog_update, name='update'),\n path('blog/create', views.blog_create, name='create')\n]\n","repo_name":"MrLiupython/myblog","sub_path":"blogs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72723929446","text":"from PyQt4 import QtGui, QtCore\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\n\nimport re\nimport sys\n\nfrom lxml import etree as et\nfrom collections import Counter, deque\nimport logging\n\n\n# from model.PlaceItem import PlaceItem\n# from model.TransitionItem import TransitionItem\n# from model.TokenItem import TokenItem\n# from model.ArcItem import ArcItem\n# from model.PortItem import PortItem\n# from model.VisualNodes import *\n\n\nclass XMLIO(object):\n '''XML Input and Output'''\n def __init__(self, simulator, rootElementName=\"\"):\n '''Init XML parser.\n \n :param simulator: Simulator to import to or export from.\n :param rootElementName: XML root element.\n '''\n self.simulator = simulator\n if str( rootElementName ) != \"\":\n self.cpn = et.Element(rootElementName)\n self.cpn.text = str(simulator.net)\n #------------------------------------------------------------------------------------------------\n \n def loadNet(self, filename):\n '''Parse XML file and prepare data for object creation.\n \n :param filename: Filepath to XML file which shall be loaded.\n :return [ netName, subnets, serConnections, serPlaces, serTransitions ]: A list containing lists for object creation.\n '''\n \n parser = et.XMLParser(dtd_validation=False)\n serPlaces = []\n serTransitions = [] \n serConnections = []\n subnets = []\n lastConnection = None\n netName = \"\"\n \n try:\n foundPlace = False\n foundTransition = False\n foundConnection = False\n foundPos = False\n foundInitMark = False\n foundLog = False\n initMark = [] \n \n cpnTree = et.parse(filename, parser) \n \n place = None\n \n actualSubnet = \"\"\n\n for idx, element in enumerate( cpnTree.getroot().iter(\"*\") ) : \n# print(element, element.tag, element.text)\n if lastConnection is not None:\n serConnections.append( [actualSubnet, lastConnection] ) \n lastConnection = None\n \n if idx is 0:\n netName = element.text \n \n if str( element.tag ) == str(\"SubnetName\"):\n actualSubnet = element.text\n \n subnets.append( actualSubnet )\n \n if str( element.tag ) == str(\"Place\"):\n if not foundPlace: \n foundPlace = True\n place = [] \n else: \n foundInitMark = False\n place.append(initMark)\n serPlaces.append( [actualSubnet, place] )\n place = [] \n \n if str( element.tag ) == str(\"Transition\"):\n if not foundPlace: \n if not foundTransition: \n foundTransition = True\n transition = [] \n else: \n serTransitions.append( [actualSubnet, transition] )\n transition = [] \n else: \n foundPlace = False\n foundInitMark = False\n place.append(initMark)\n serPlaces.append( [actualSubnet, place] )\n foundTransition = True\n transition = [] \n \n if str( element.tag ) == str(\"Connection\"):\n if not foundTransition: \n if not foundConnection: \n foundConnection = True\n connection = [] \n else: \n serConnections.append( [actualSubnet, connection] ) \n connection = [] \n else: \n foundTransition = False\n serTransitions.append( [actualSubnet, transition] )\n foundConnection = True\n connection = [] \n \n if \"uniqueName\" in element.tag:\n if foundConnection and not foundTransition and not foundPlace:\n if \"SRC\" in element.tag:\n connection.append( element.text )\n elif \"DST\" in element.tag:\n connection.append( element.text )\n else:\n if foundPlace and not foundTransition and not foundConnection:\n place.append( element.text )\n if foundTransition and not foundPlace and not foundConnection:\n transition.append( element.text )\n \n if str( element.tag ) == str(\"name\"):\n if foundPlace and not foundTransition and not foundConnection:\n place.append( element.text )\n if foundTransition and not foundPlace and not foundConnection:\n transition.append( element.text )\n if foundConnection and not foundPlace and not foundTransition:\n connection.append( element.text )\n \n if str( element.tag ) == str(\"sourceConnector\") and foundConnection:\n connection.append( element.text )\n \n if str( element.tag ) == str(\"destinationConnector\") and foundConnection:\n connection.append( element.text )\n lastConnection = connection\n foundConnection = False\n \n if str( element.tag ) == str(\"portClone\") and foundPlace:\n place.append( element.text )\n \n if str( element.tag ) == str(\"port\") and foundPlace:\n place.append( element.text )\n \n if str( element.tag ) == str(\"pos\"):\n pos = []\n foundPos = True\n if str( element.tag ) == str(\"x\") and foundPos:\n if foundPlace or foundTransition:\n pos.append( element.text )\n if str( element.tag ) == str(\"y\") and foundPos:\n if foundPlace or foundTransition:\n pos.append( element.text )\n if foundPlace:\n place.append(pos)\n if foundTransition:\n transition.append(pos)\n foundPos = False\n \n if str( element.tag ) == str(\"initMarking\") and foundPlace:\n foundInitMark = True\n initMark = [] \n if \"token\" in element.tag and foundInitMark and foundPlace:\n initMark.append( element.text ) \n \n if str( element.tag ) == str(\"guardExpression\") and foundTransition:\n transition.append( element.text )\n \n if str( element.tag ) == str(\"subnet\") and foundTransition:\n transition.append( element.text )\n \n if str( element.tag ) == str(\"Log\"):\n foundLog = True\n \n if not foundLog: \n serConnections.append( [actualSubnet, lastConnection] )\n return [ netName, subnets, serConnections, serPlaces, serTransitions ]\n \n except et.XMLSyntaxError:\n logging.debug( \"XML Parse Error %s\" %( str( sys.exc_info()[1] ) ) )\n return [ netName, subnets, serConnections, serPlaces, serTransitions ]\n \n #------------------------------------------------------------------------------------------------\n \n \n \n \n \n \n \n \n \n def netToXML(self, subnet, placesS, transitionsS, connectionsS):\n '''Save Colored Petrinet **Subnet** to XML tree. \n \n :param subnet: Subnet name.\n :param placesS: Place contained in `subnet`.\n :param transitionsS: Transitions contained in `subnet`.\n :param connectionsS: Connections contained in `subnet`.\n '''\n \n subnetName = et.SubElement(self.cpn, \"SubnetName\")\n subnetName.text = str( subnet )\n \n serPlaces = [] \n \n for place in placesS: \n \n if place.portClone is None and str(place.portDirection) != str(\"None\") and (\"i\" in place.portDirection or \"o\" in place.portDirection):\n place.portClone = place.uniqueName\n marking = None\n else:\n if isinstance( place.initMarking, deque ):\n marking = list( place.initMarking )\n else: \n marking = place.initMarking \n place.portClone = None\n \n \n serPlaces.append( \n [\n place.name, \n [place.scenePos().x(), place.scenePos().y()], \n marking,\n place.uniqueName,\n place.portDirection,\n place.portClone\n ]\n )\n \n serTransitions = [] \n for transition in transitionsS:\n serTransitions.append( \n [\n transition.name, \n [transition.scenePos().x(), transition.scenePos().y()], \n transition.guardExpression if not transition.substitutionTransition else \"None\",\n transition.uniqueName,\n transition.subnet if transition.substitutionTransition else \"None\"\n ]\n )\n \n serConnections = [] \n for connection in connectionsS:\n serConnections.append(\n [\n connection[0].parent.uniqueName,\n connection[2].parent.uniqueName,\n connection[1].name,\n connection[3],\n connection[4] \n ]\n )\n \n for p in serPlaces:\n place = et.SubElement(subnetName, \"Place\")\n uN = et.SubElement(place, \"uniqueName\")\n uN.text = p[3]\n na = et.SubElement(place, \"name\")\n na.text = p[0]\n nc = et.SubElement(place, \"portClone\")\n nc.text = str( p[5] )\n nc = et.SubElement(place, \"port\")\n nc.text = str( p[4] )\n pos = et.SubElement(place, \"pos\")\n posx = et.SubElement(pos, \"x\")\n posx.text = str(p[1][0])\n posy = et.SubElement(pos, \"y\")\n posy.text = str(p[1][1])\n inMark = et.SubElement(place, \"initMarking\")\n if isinstance(p[2], list):\n for idx, iM in enumerate(p[2]):\n if str( iM ) != str(\"None\") and iM is not None:\n _iM = et.SubElement(inMark, \"token%d\"%idx)\n _iM.text = str(iM)\n else:\n if p[2] is not None:\n _iM = et.SubElement(inMark, \"token0\")\n _iM.text = str(p[2]) \n \n for t in serTransitions:\n transition = et.SubElement(subnetName, \"Transition\")\n uN = et.SubElement(transition, \"uniqueName\")\n uN.text = t[3]\n na = et.SubElement(transition, \"name\")\n na.text = t[0]\n pos = et.SubElement(transition, \"pos\")\n posx = et.SubElement(pos, \"x\")\n posx.text = str(t[1][0])\n posy = et.SubElement(pos, \"y\")\n posy.text = str(t[1][1])\n guEx = et.SubElement(transition, \"guardExpression\")\n guEx.text = t[2] \n guEx = et.SubElement(transition, \"subnet\")\n guEx.text = str( t[4] )\n \n for c in serConnections:\n connection = et.SubElement(subnetName, \"Connection\")\n src = et.SubElement(connection, \"uniqueNameSRC\")\n src.text = c[0]\n dst = et.SubElement(connection, \"uniqueNameDST\")\n dst.text = c[1]\n na = et.SubElement(connection, \"name\")\n na.text = c[2]\n na = et.SubElement(connection, \"sourceConnector\")\n na.text = \"%d\"%c[3]\n na = et.SubElement(connection, \"destinationConnector\")\n na.text = \"%d\"%c[4] \n #------------------------------------------------------------------------------------------------\n \n def saveLog(self, logList):\n '''Save Log Entries to XML tree.\n \n :param logList: List of log entries from the log widget.\n '''\n \n subnetName = et.SubElement(self.cpn, \"Log\")\n for logItem in logList:\n na = et.SubElement(subnetName, \"Line\")\n na.text = logItem\n #------------------------------------------------------------------------------------------------\n \n \n def saveNet(self, filename): \n '''Save XML tree to file.\n \n :param filename: Filepath where the XML tree is saved. \n '''\n doctype = \"\"\"\"\"\" \n with et.xmlfile(filename, encoding='utf-8') as xf:\n xf.write_declaration(standalone=False)\n xf.write_doctype(doctype)\n tree = et.ElementTree(self.cpn)\n xf.write(tree.getroot())\n \n pass\n #------------------------------------------------------------------------------------------------","repo_name":"chris-kuhr/CPNSimulatorGui","sub_path":"src/inout/XMLIO.py","file_name":"XMLIO.py","file_ext":"py","file_size_in_byte":14790,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"52"} +{"seq_id":"39663521271","text":"from tkinter import messagebox\n\nimport customtkinter\nfrom tkinter import *\nfrom Person import Person\n\n\nclass AddPerson:\n def __init__(self, master):\n self.master = master\n self.itm = None\n self.window = customtkinter.CTkToplevel(master)\n self.window.title(\"Add Person\")\n self.window.geometry(\"250x60\")\n\n # Entry to enter name\n self.nameEntry = customtkinter.CTkEntry(self.window)\n self.nameEntry.grid(row=0, column=0, padx=5, pady=5, sticky=\"nswe\")\n\n # Enter Button\n self.insertButton = customtkinter.CTkButton(self.window, text=\"Insert\",\n fg_color=\"#bdbebe\",\n hover_color=\"#8d8e8d\",\n command=self.insertEvent,\n width=80)\n self.insertButton.grid(row=0, column=1, padx=5, pady=5, sticky=\"nswe\")\n self.window.bind(\"\", self.insertEvent)\n self.window.bind(\"\", self.destroy)\n\n def insertEvent(self, e=None):\n name = self.nameEntry.get()\n if len(name) == 0:\n response = messagebox.showinfo(\"info\", \"Name entry is empty\")\n return\n\n for i in self.master.people:\n if i.getName() == name:\n response = messagebox.showinfo(\"info\", \"Name already exist\")\n return\n\n self.master.people.append(Person(name))\n self.master.add_tab(name)\n\n self.window.destroy()\n\n def destroy(self, e=None):\n self.window.destroy()\n","repo_name":"brianlin725/BillSplit","sub_path":"subMenu/AddPerson.py","file_name":"AddPerson.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40146591472","text":"\"\"\" DSFM Illustration: Basis expansion\n -------------------------------------------------------------------------------------------------\n \n Creator: Data Science for Managers - EPFL Program - https://www.dsfm.ch\n Source: https://github.com/dsfm-org/code-bank.git\n License: MIT License (https://opensource.org/licenses/MIT) - see LICENSE in Code Bank repository. \n \n Sections of code adapted from: https://matplotlib.org/gallery/widgets/slider_demo.html\n\n OVERVIEW:\n \n The following module executes much of the same illustration as the expected-value notebook, \n but allows for interactive setting of the cost structure of False Positivea and False Negatives.\n\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider, Button\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\n\n# =================\n\nC_FN_0 = 4\nC_FP_0 = 2\n\n# =================\n\n# load data\ndata = pd.read_csv('data/credit_data.csv')\n\n# Select target\ny = np.array(data['customer_default'])\n\n# Select features\nfeatures = list(set(list(data.columns)) - set(['customer_default']))\nX = data.loc[:, features]\n\n# Divide data into a training set and a testing set using the train_test_split() function\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state=1, stratify=y)\n\n# Fit an OLS linear regression\nols_model = LinearRegression()\nols_model.fit(X_train, y_train)\ny_hat_ols_prob = ols_model.predict(X_test) # Precict the probability\n\n# =================\n\n# Initial graph\nresults = []\nfor i in range(1, 50):\n threshold = 0.02 * i\n y_hats = [int(v >= threshold) for v in y_hat_ols_prob]\n errors = []\n for r in zip(y_test, y_hats):\n actual_value = r[0]\n predicted_value = r[1]\n # Incorrect prediction\n if predicted_value != actual_value:\n\n # False positve\n if predicted_value:\n errors.append(C_FP_0)\n\n # False negative\n else:\n errors.append(C_FN_0)\n # Correct prediction\n else:\n errors.append(0)\n total_error = sum(errors)\n results.append((total_error, threshold))\noptimal_p = sorted(results)[0][1]\noptimal_acc = sorted(results)[0][0]\n\nprint('Optimal probability threshold = {} with costs = {}\\n'.format(round(optimal_p, 4), round(optimal_acc, 4)))\n\n# =================\n# Plot initial graph\ny, x = zip(*results)\n\nfig, ax = plt.subplots(figsize = (7, 8))\nplt.subplots_adjust(left=0.25, bottom=0.35)\nax.set_ylabel('Weighted Count of Error')\nax.set_xlabel('Probability Threshold')\nl, = plt.plot(x, y, lw=2)\nplt.grid()\nax.margins(x=0.1, y = 0.0001*max(y))\n\naxcolor = 'white'\naxFP = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)\naxFN = plt.axes([0.25, 0.15, 0.65, 0.03], facecolor=axcolor)\n\nc_fn = Slider(axFN, 'Costs False Negative', 1, 15.0, valinit=C_FN_0)\nc_fp = Slider(axFP, 'Costs False Positive', 1, 10.0, valinit=C_FP_0)\n\n\ndef update(val):\n\n fn = c_fn.val\n fp = c_fp.val\n\n results = []\n for i in range(1, 50):\n threshold = 0.02 * i\n y_hats = [int(v >= threshold) for v in y_hat_ols_prob]\n errors = []\n for r in zip(y_test, y_hats):\n actual_value = r[0]\n predicted_value = r[1]\n # Incorrect prediction\n if predicted_value != actual_value:\n\n # False positve\n if predicted_value:\n errors.append(fp)\n\n # False negative\n else:\n errors.append(fn)\n # Correct prediction\n else:\n errors.append(0)\n total_error = sum(errors)\n results.append((total_error, threshold))\n optimal_p = sorted(results)[0][1]\n optimal_costs = sorted(results)[0][0]\n\n print('Optimal probability threshold = {} \\twith costs = {}'.format(round(optimal_p, 4), round(optimal_costs, 1)))\n\n y, x = zip(*results)\n l.set_ydata(y)\n fig.canvas.draw_idle()\n\n\nc_fp.on_changed(update)\nc_fn.on_changed(update)\n\nresetax = plt.axes([0.8, 0.025, 0.1, 0.04])\nbutton = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')\n\ndef reset(event):\n c_fp.reset()\n c_fn.reset()\nbutton.on_clicked(reset)\n\nplt.show()\n","repo_name":"jbesomi/code-bank","sub_path":"illustrations/expected-value/expected-value.py","file_name":"expected-value.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36754630673","text":"#programmers: Jonathan, Nick, Craig\n#date: 11 11 2021\n#this program holds a conversation with Jonathan's wife\n\n#initial variables\n#facts\n\n\nwife = \"peyton\"\nage = 25\noccupation = \"nurse\"\nexclamation = \"fuck your coworkers!\"\n\n#emotions\n\n#say hi to user\nprint(\"This program holds a conversation with Jonathan's wife\")\n#ask user for name\nname = str(input(\"Hi! What is your name?\"))\n#confirm user name\nif name.lower() == wife:\n print(\"You are Peyton!\")\nelse:\n print(\"you are not peyton\") \n\n#get user age\ngivenAge = int(input(\"What is your age?\"))\n#confirm age\nif givenAge == age:\n print(\"Very cool, enjoy your 20s\")\nelif givenAge > age:\n print(\"You're not that old, you lier\")\nelse:\n print(\"Nice try, you old fart\")\n\n#get user occupation\ngivenOccupation = str(input(\"What do you do for a living?\"))\n\nif givenOccupation.lower() == occupation:\n print(\"That's great, I hope you like it\")\nelif givenOccupation.lower() == \"nursing\":\n print(\"That's great, I hope you like it\")\nelse:\n print(\"Nice try, you lier\")\n\n#after asking what she does for work, ask if she worked today\n#if she did, ask her if it went well, yes or no\n#if no, console her\n\n#have it ask how she is doing in general","repo_name":"njclark1/SchoolCode","sub_path":"Python/parrot.py","file_name":"parrot.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33676990920","text":"from peewee import *\nfrom datetime import date\nfrom config.dbConfig import Base\n\n\nclass Notification(Base):\n id = AutoField()\n uid = IntegerField()\n username = CharField()\n pid = IntegerField(default=None)\n title = CharField(default='')\n date = CharField(default=date.today().strftime('%d/%m/%Y'))\n category = CharField()\n user = IntegerField()\n\n class Meta:\n database = Base._meta.db\n table_name = 'Notification'\n","repo_name":"TenshiRachel/flask-outsource","sub_path":"models/notifications.py","file_name":"notifications.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33025491750","text":"import cmath\nfrom PIL import Image\n\n\nPHI_D = float(input(\"input angle in terms of pi:\"))\nMAG = float(input(\"input magnitude:\"))\nJULIA = cmath.rect(MAG,PHI_D)\nscreen = (int(input(\"x size:\")),int(input(\"y size:\")))\ntrap = Image.open(input(\"filename:\"))\ncorners = [0,0,0,0]\ntrap_back = Image.new('RGB',screen)\nxp = int(input(\"x image offset:\"))\nyp= int(input(\"y image offset:\"))\ntrap_back.paste(trap,(xp,yp))\ncorners[0] =xp/(screen[0]/2)-1 \ncorners[1] = (xp+trap.size[0])/(screen[0]/2)-1\ncorners[2] = yp/(screen[1]/2)-1\ncorners[3] = (yp+trap.size[1])/(screen[1]/2)-1\nMAX_ITER = int(input(\"Iterations: \"))\n\ncanvas = Image.new('RGB',screen)\ncount = 0\nperc = 0\nfor x in range(0,screen[0]):\n for y in range(0,screen[1]):\n num = (x/(screen[0]/2)-1,y/(screen[1]/2)-1)\n z = complex(num[0],num[1])\n color = (0,0,0)\n hit = False\n for i in range(0,MAX_ITER):\n count = i\n z = pow(z,2) + JULIA\n if z.real >corners[0] and z.real corners[2] and z.imag 2:\n color = (0,0,0)\n break\n if abs(z) > 2:\n canvas.putpixel((x,y),color)\n else:\n canvas.putpixel((x,y),color)\n perc +=1\n if perc == 20:\n print(str(x/screen[0]*100)+\"%\")\n perc = 0\ncanvas.show()\n \n \n","repo_name":"zipy124/orbital-trap","sub_path":"julia.py","file_name":"julia.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37286091198","text":"#\n# @lc app=leetcode id=685 lang=python3\n#\n# [685] Redundant Connection II\n#\n# https://leetcode.com/problems/redundant-connection-ii/description/\n#\n# algorithms\n# Hard (30.92%)\n# Likes: 488\n# Dislikes: 159\n# Total Accepted: 24.4K\n# Total Submissions: 78.6K\n# Testcase Example: '[[1,2],[1,3],[2,3]]'\n#\n# \n# In this problem, a rooted tree is a directed graph such that, there is\n# exactly one node (the root) for which all other nodes are descendants of this\n# node, plus every node has exactly one parent, except for the root node which\n# has no parents.\n# \n# The given input is a directed graph that started as a rooted tree with N\n# nodes (with distinct values 1, 2, ..., N), with one additional directed edge\n# added. The added edge has two different vertices chosen from 1 to N, and was\n# not an edge that already existed.\n# \n# The resulting graph is given as a 2D-array of edges. Each element of edges\n# is a pair [u, v] that represents a directed edge connecting nodes u and v,\n# where u is a parent of child v.\n# \n# Return an edge that can be removed so that the resulting graph is a rooted\n# tree of N nodes. If there are multiple answers, return the answer that\n# occurs last in the given 2D-array.\n# Example 1:\n# \n# Input: [[1,2], [1,3], [2,3]]\n# Output: [2,3]\n# Explanation: The given directed graph will be like this:\n# ⁠ 1\n# ⁠/ \\\n# v v\n# 2-->3\n# \n# \n# Example 2:\n# \n# Input: [[1,2], [2,3], [3,4], [4,1], [1,5]]\n# Output: [4,1]\n# Explanation: The given directed graph will be like this:\n# 5 2\n# ⁠ ^ |\n# ⁠ | v\n# ⁠ 4 \n# \n# Note:\n# The size of the input 2D-array will be between 3 and 1000.\n# Every integer represented in the 2D-array will be between 1 and N, where N is\n# the size of the input array.\n# \n#\nclass DSU:\n def __init__(self, N):\n self.ranks = [0] * (N+1)\n self.groups = list(range(N+1))\n \n def find(self, x):\n if self.groups[x] == x:\n return x\n return self.find(self.groups[x])\n \n def union(self, x, y):\n gx = self.find(x)\n gy = self.find(y)\n if gx == gy: \n return False\n if self.ranks[gx] > self.ranks[gy]:\n self.groups[gy] = gx\n elif self.ranks[gx] < self.ranks[gy]:\n self.groups[gx] = gy\n else:\n self.groups[gy] = gx\n self.ranks[gy] += 1\n return True\n\nclass Solution:\n def findRedundantDirectedConnection(self, edges: List[List[int]]) -> List[int]:\n\n def is_cycle(edge):\n # return True if from edge=x, y can get back to x\n x, y = edge\n while x != y and x in parent:\n x = parent[x]\n return x == y\n \n parent = {}\n candidates = []\n for x, y in edges:\n if y not in parent:\n parent[y] = x\n else:\n candidates.append([parent[y], y])\n candidates.append([x, y])\n \n if candidates:\n if is_cycle(candidates[0]):\n return candidates[0]\n return candidates[1]\n \n else:\n N = len(edges)\n dsu = DSU(N)\n for x, y in edges:\n if not dsu.union(x, y):\n return [x, y]\n return []\n\n","repo_name":"chenxu0602/LeetCode","sub_path":"685.redundant-connection-ii.py","file_name":"685.redundant-connection-ii.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"74818383844","text":"from config.config_constants import SCRIPT_FOLDER, REMOTE_CONTROLLER_IP, REMOTE_CONTROLLER_PORT\nfrom mininet_ns_script_template import gen_mn_ns_script_by_template, gen_mn_ns_script_by_template_with_custom_host_ip\nfrom host_configurator import define_node_ip_pool, get_next_IP\n\n\n\n\ndef gen_turn_on_script_by_template(file, nodes_ext_intf, node_group, edge_group, ext_intf_list, leaves, node_ctrl_map):\n '''Generate turn on script for Cluster node.\n\n Args:\n file: File discriptor of future turn on script.\n nodes_ext_intf: Cluster node external network interface name.\n node_group: Group ID to node-list map.\n edge_group: Group ID to edge-list map.\n ext_intf_list: External network insterface name to the node group.\n leaves: List of leave-node in network graph.\n '''\n file.write('import re\\n')\n\n file.write('from mininet.cli import CLI\\n')\n file.write('from mininet.log import setLogLevel, info, error\\n')\n file.write('from mininet.net import Mininet\\n')\n file.write('from mininet.link import Intf\\n')\n file.write('from mininet.topo import Topo\\n')\n file.write('from mininet.util import quietRun\\n')\n file.write('from mininet.node import RemoteController\\n')\n file.write('\\n')\n file.write('sw_ext_intf = [')\n for i, node in enumerate(ext_intf_list):\n file.write('\\'s')\n file.write(str(node))\n file.write('\\'')\n if i != len(ext_intf_list)-1:\n file.write(',')\n file.write(']')\n file.write('\\n')\n file.write('\\n')\n file.write('class MyTopo( Topo ):\\n')\n file.write(' \\\"Auto generated topology for this Mininet Node\\\"\\n')\n file.write(' def __init__( self ):\\n')\n file.write(' Topo.__init__( self )\\n')\n file.write('\\n')\n file.write(' \\\"Add hosts and swiches\\\"\\n')\n for node in node_group:\n if node in leaves:\n file.write(' h')\n file.write(str(node))\n file.write(' = self.addHost( \\'h')\n file.write(str(node))\n file.write('\\' )\\n')\n else:\n file.write(' s')\n file.write(str(node))\n file.write(' = self.addSwitch( \\'s')\n file.write(str(node))\n file.write('\\' )\\n')\n file.write('\\n')\n file.write(' \\\"Add links\\\"\\n')\n for edge in edge_group:\n file.write(' self.addLink( ')\n if edge[0] in leaves:\n file.write('h')\n else:\n file.write('s')\n file.write(str(edge[0]))\n file.write(', ')\n if edge[1] in leaves:\n file.write('h')\n else:\n file.write('s')\n file.write(str(edge[1]))\n file.write(' )\\n')\n file.write('\\n')\n file.write('\\n')\n file.write('def checkIntf( intf ):\\n')\n file.write(' \"Make sure intf exists and is not configured.\"\\n')\n file.write(' if ( \\' %s:\\' % intf ) not in quietRun( \\'ip link show\\' ):\\n')\n file.write(' error( \\'Error:\\', intf, \\'does not exist!\\\\n\\' )\\n')\n file.write(' exit( 1 )\\n')\n file.write(' ips = re.findall( r\\'\\d+\\.\\d+\\.\\d+\\.\\d+\\', quietRun( \\'ifconfig \\' + intf ) )\\n')\n file.write(' if ips:\\n')\n file.write(' error( \\'Error:\\', intf, \\'has an IP address,\\'\\n')\n file.write(' \\'and is probably in use!\\\\n\\' )\\n')\n file.write(' exit( 1 )\\n')\n file.write('\\n')\n file.write('if __name__ == \\'__main__\\':\\n')\n file.write(' setLogLevel( \\'info\\' )\\n')\n file.write('\\n')\n file.write(' intfName = \\'')\n file.write(nodes_ext_intf)\n file.write('\\'\\n')\n file.write(' info( \\'*** Checking\\', intfName, \\'\\\\n\\' )\\n')\n file.write(' checkIntf( intfName )\\n')\n file.write('\\n')\n file.write(' info( \\'*** Creating network\\\\n\\' )\\n')\n file.write(' net = Mininet( topo=MyTopo(), controller=lambda name: RemoteController( name,ip=\\'')\n file.write(node_ctrl_map[0])\n file.write('\\',port=int(\\'')\n file.write(node_ctrl_map[1])\n file.write('\\') ) )\\n')\n file.write('\\n')\n file.write(' for sw in net.switches:\\n')\n file.write(' if sw.name in sw_ext_intf:\\n')\n file.write(' info( \\'*** Adding hardware interface\\', intfName, \\'to switch\\',\\n')\n file.write(' sw.name, \\'\\\\n\\' )\\n')\n file.write(' _intf = Intf( intfName, node=sw )\\n')\n file.write('\\n')\n file.write(' info( \\'*** Note: you may need to reconfigure the interfaces for \\'\\n')\n file.write(' \\'the Mininet hosts:\\\\n\\', net.hosts, \\'\\\\n\\' )\\n')\n file.write('\\n')\n file.write(' net.start()\\n')\n file.write(' CLI( net )\\n')\n file.write(' net.stop()\\n')\n\n\ndef generate_mininet_turn_on_script_auto(node_intf_map, node_groups, edge_groups, node_ext_intf_group, leaves,\n node_map, node_ctrl_map):\n '''Generate turn on script for Cluster node.\n\n Args:\n node_ext_intf: External network insterface name to the node.\n node_group: Group ID to node-list map.\n edge_group: Group ID to edge-list map.\n node_ext_insf_group: External network insterface name to the node group.\n leaves: List of leave-node in network graph.\n node_map: Cluster node map.\n '''\n for group in node_groups.keys():\n if group != 'ext_intf':\n node_IP = node_map.keys()[group]\n filename = 'turn_on_script_for_' + node_IP + '.py'\n filepath = SCRIPT_FOLDER + filename\n\n file = open(filepath, 'w')\n\n gen_turn_on_script_by_template(file, node_intf_map[node_IP], node_groups[group], edge_groups[group],\n node_ext_intf_group, leaves, node_ctrl_map[node_IP])\n file.close()\n\n\n\n\ndef generate_mn_ns_script_auto(node_intf_map, node_groups, edge_groups, node_ext_intf_group, leaves,\n node_map, node_ctrl_map, hosts_net_services):\n '''Generate turn on script for Cluster node.\n\n Args:\n node_ext_intf: External network insterface name to the node.\n node_group: Group ID to node-list map.\n edge_group: Group ID to edge-list map.\n node_ext_insf_group: External network insterface name to the node group.\n leaves: List of leave-node in network graph.\n node_map: Cluster node map.\n '''\n for group in node_groups.keys():\n if group != 'ext_intf':\n node_IP = node_map.keys()[group]\n filename = 'turn_on_script_for_' + node_IP + '.py'\n filepath = SCRIPT_FOLDER + filename\n\n file = open(filepath, 'w')\n\n gen_mn_ns_script_by_template(file, node_intf_map[node_IP], node_groups[group], edge_groups[group],\n node_ext_intf_group, leaves, node_ctrl_map[node_IP], hosts_net_services)\n file.close()\n\n\ndef generate_mn_ns_script_with_custom_host_ip_auto(nodes, groups, leaves, hosts_net_services):\n '''Generate turn on script for Cluster node.\n\n Args:\n\n '''\n hosts = {}\n\n nodes = define_node_ip_pool(groups, leaves, nodes)\n\n for ip, node in nodes.items():\n group = groups[node['group']]\n curr_host_ip = node['IP_pool']\n for node_in_gr in group['vertexes']:\n if node_in_gr in leaves:\n curr_host = 'h' + str(node_in_gr)\n host = {}\n host['IP'] = curr_host_ip\n host['name'] = curr_host\n host['IP_node'] = ip\n\n hosts[curr_host] = host\n # prepare for next host\n curr_host_ip = get_next_IP(curr_host_ip)\n\n filename = 'turn_on_script_for_' + ip + '.py'\n filepath = SCRIPT_FOLDER + filename\n\n file = open(filepath, 'w')\n\n spec_group = groups['no_group']\n\n gen_mn_ns_script_by_template_with_custom_host_ip(file, node, group, spec_group, leaves,\n hosts_net_services, hosts)\n file.close()\n\n\n return hosts\n\n\nif __name__ == '__main__':\n pass\n\n","repo_name":"ARCCN/nps","sub_path":"src/mininettools/mininet_script_generator.py","file_name":"mininet_script_generator.py","file_ext":"py","file_size_in_byte":8107,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"33998967690","text":"\"\"\"Tests for HPfilter wrapper annotation estimator.\"\"\"\n\n__author__ = [\"ken-maeda\"]\n\nimport pandas as pd\nimport pytest\nfrom numpy import array_equal\n\nfrom sktime.utils.validation._dependencies import _check_soft_dependencies\n\n\n@pytest.mark.skipif(\n not _check_soft_dependencies(\"statsmodels.api\", severity=\"none\"),\n reason=\"skip test if required soft dependency for statsmodels.api not available\",\n)\ndef test_HPFilter_wrapper():\n \"\"\"Verify that the wrapped HPFilter estimator agrees with statsmodel.\"\"\"\n # moved all potential soft dependency import inside the test:\n\n import statsmodels.api as sm\n\n from sktime.transformations.series.hpfilter import HPFilter as _HPFilter\n\n dta = sm.datasets.macrodata.load_pandas().data\n index = pd.date_range(start=\"1959Q1\", end=\"2009Q4\", freq=\"Q\")\n dta.set_index(index, inplace=True)\n sm_cycle = sm.tsa.filters.hpfilter(dta[[\"realinv\"]], 1600)[0]\n sm_cycle = pd.DataFrame(sm_cycle)\n hp = _HPFilter(lamb=1600)\n sk_cycle = hp.fit_transform(X=dta[[\"realinv\"]])\n assert array_equal(sm_cycle, sk_cycle)\n","repo_name":"sktime/sktime","sub_path":"sktime/transformations/tests/test_hpfilter.py","file_name":"test_hpfilter.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":7028,"dataset":"github-code","pt":"52"} +{"seq_id":"72625487204","text":"'''11) Escreva uma função limites(l) que recebe uma lista l de números e retorna uma tupla\n(min,max), onde min representa o menor dos elementos da lista e max o maior dos elementos da\nlista. Não utilize funções prontas para determinar o menor ou maior elemento da lista.\n'''\n\ndef limites(l):\n min = l[0]\n max = l[0]\n for i in range(len(l)):\n if l[i] < min:\n min = l[i]\n if l[i] > max:\n max = l[i]\n final = (min, max)\n print(final)\n\nl = [1, 7, 8, 9]\nlimites(l)","repo_name":"ddanieloliver/Python","sub_path":"Exercícios/Lista 5/Ex11.py","file_name":"Ex11.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9192374289","text":"import pickle\nimport os\nimport numpy as np\nfrom nmf_train import nmf_train\nfrom globals import *\nfrom combine_sounds import combine_sounds\n\nimport librosa\n\n\nroot = \"dev_dataset/dcase2016_task2_train\"\neffect_list = [name for name in os.listdir(root)]\n\ntrained_matrix = np.random.rand(\n int(WINDOW_LENGTH / 2 + 1), COMPONENTS_PER_EFFECT * (len(effect_list) + 1))\n\n# Training on sounds in training_sounds\nroot = \"training_sounds\"\nlist = [os.listdir(root)[1]] + [os.listdir(root)[5]] + [os.listdir(root)[9]]\nprint(list)\ntrained_matrix = np.random.rand(\n int(WINDOW_LENGTH / 2 + 1), COMPONENTS_PER_EFFECT * (len(list) + 1))\nfor i in range(len(list)):\n\tx = list[i]\n\tpath = os.path.join(root, x)\n\tprint(path)\n\tcomponents, activations = nmf_train(path, COMPONENTS_PER_EFFECT)\n\n\ttrained_matrix[:, i * COMPONENTS_PER_EFFECT:(i + 1) * COMPONENTS_PER_EFFECT] = components\n\ncolormap(trained_matrix)\nf = open(\"trained_matrix_concatenated_sounds_small3.pkl\", \"wb\")\npickle.dump(trained_matrix, f)\nf.close()\n","repo_name":"maxschuman/nmf_auditory_scenes","sub_path":"build_model.py","file_name":"build_model.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"44008541083","text":"from .optimizations import find_best_for_n_steps, \\\n max_expected_length, collect_genotypes\nfrom .treatment import load_treatments_from_file\nfrom argparse import ArgumentParser\nimport sys\n\ndef bitstringconverter(l):\n 'Returns function for converting ints to '\\\n 'zero padded bitstrings'\n len = max(x.bit_length() for x in l)\n\n def tobitstr(n):\n return format(n,f'0{len}b')\n\n return tobitstr\n\nparser = ArgumentParser()\nparser.add_argument('file')\nparser.add_argument('pathlength',nargs='?')\nparser.add_argument('-d',dest='d',type=float,metavar=\"discount\",\n help=\"Discount Factor\")\nparser.add_argument('--discount',dest='d',type=float,metavar=\"discount\",\n help=\"Discount Factor\")\nparser.add_argument('-e','--expected',dest='e',action='store_true',\n help=\"expected reward flag\")\nargs = parser.parse_args()\nfile,length = args.file,args.pathlength\n\ntl = load_treatments_from_file(file)\n\ngenotypes = collect_genotypes(tl)\ntobs = bitstringconverter(genotypes)\n\nif args.e:\n if args.d is None:\n discount = 0.9\n else:\n discount = args.d\n if args.pathlength is not None:\n print(\"Warning: Path length ignored with this option\",\n file = sys.stderr)\n res = max_expected_length(tl,discount)\n print('To maximize expected reward:')\n print('Genotype','Treatment','Reward')\n for g,(t,p) in res.items():\n print(f'{tobs(g):>8}',\\\n f'{t!s:>9}',\\\n f'{p:>11.5f}')\n\nif not args.e:\n if args.d is not None:\n print(\"Cannot have discount factor for n-step \"\\\n \"calculation\",file=sys.stderr)\n sys.exit(1)\n if args.pathlength is None:\n print(\"Must have path length for this option\",\n file=sys.stderr)\n sys.exit(1)\n res = find_best_for_n_steps(tl,args.pathlength)\n symb = \"oo\" if args.pathlength in [\"oo\",\"inf\"] \\\n else \">= \"+str(args.pathlength)\n print('To maximize probability of ' + symb + \\\n ' steps:')\n print('Genotype','Treatment','Probability')\n for g in genotypes:\n if hasattr(res[g][0],\"name\"):\n treat = res[g][0].name\n else:\n treat = 'None'\n prob = res[g][1]\n\n if args.pathlength in ['oo','inf']:\n prob = '--'\n print(f'{tobs(g):>8}',\n f'{treat:>9}',\n (format('--','>11s') if prob == '--' \n else f'{prob:>11.5f}'))\n","repo_name":"devingreene/drug_cycling","sub_path":"drug_cycling/fbt.py","file_name":"fbt.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37796360081","text":"\"\"\"Set up and run dustywave calculations.\n\nExactly as Benitez-Llambay et al. (2019).\n\nNeed to set the following variables:\n SIMULATION\n The simulation type; here it is 'dustybox'.\n PARAMETERS\n The parameters dictionary of dictionaries for each run.\n RUN_DIRECTORY\n The path to the directory to store the runs.\n PATCH_FILE\n An optional Phantom patch file.\n\nThe PARAMETERS variable is a dictionary of parameter dictionaries.\n\nThe dictionary is as follows:\n {\n 'name_of_run1': parameters1,\n 'name_of_run2': parameters2,\n ...\n }\n\nThe 'parameters' dictionary has keys with the name of the run, which\nwill be the name of its directory, and the values are the parameters\ndictionaries for that run.\n\nEach dictionary for each run needs the following keys:\n\n 'prefix'\n 'length_unit'\n 'mass_unit'\n 'time_unit'\n 'sound_speed'\n 'box_width'\n 'number_of_particles_in_x_gas'\n 'number_of_particles_in_x_dust'\n 'omega'\n 'density_gas'\n 'delta_density_gas'\n 'delta_v_gas'\n 'density_dust'\n 'delta_density_dust'\n 'delta_v_dust'\n 'K_drag'\n 'maximum_time'\n 'number_of_dumps'\n\"\"\"\n\nimport copy\nimport sys\nimport pathlib\n\npath = pathlib.Path(__file__).parent / '..' / 'modules'\nsys.path.insert(0, str(path))\n\nfrom multigrain import run_script\n\nSIMULATION = None\nPARAMETERS = None\nRUN_DIRECTORY = None\nPATCH_FILE = None\n\n# ------------------------------------------------------------------------------------ #\n# MAKE CHANGES BELOW AS REQUIRED\n\nSIMULATION = 'dustywave'\nRUN_DIRECTORY = '~/runs/multigrain/dustywave'\n\n# Dictionary of parameters common to all runs.\n_parameters = {\n 'prefix': 'dustywave',\n 'length_unit': 1.0,\n 'mass_unit': 1.0,\n 'time_unit': 1.0,\n 'sound_speed': 1.0,\n 'box_width': 1.0,\n 'number_of_particles_in_x_gas': 128,\n 'number_of_particles_in_x_dust': 128,\n 'density_gas': 1.0,\n 'wave_amplitude': 1.0e-4,\n 'maximum_time': 2.0,\n 'number_of_dumps': 100,\n}\n\nPARAMETERS = dict()\n\n# One dust species\nd = copy.copy(_parameters)\nd['delta_density_gas'] = 1.0\nd['delta_v_gas'] = -0.701960 - 0.304924j\nd['omega'] = 1.915896 - 4.410541j\nd['density_dust'] = (2.24,)\nd['delta_density_dust'] = (0.165251 - 1.247801j,)\nd['delta_v_dust'] = (-0.221645 + 0.368534j,)\ntstop = (0.4,)\nd['K_drag'] = tuple([rho_d / ts for rho_d, ts in zip(d['density_dust'], tstop)])\n\nPARAMETERS['N_1'] = d\n\n# Four dust species\nd = copy.copy(_parameters)\nd['delta_density_gas'] = 1.0\nd['delta_v_gas'] = -0.874365 - 0.145215j\nd['omega'] = 0.912414 - 5.493800j\nd['density_dust'] = (0.1, 0.233333, 0.366667, 0.5)\nd['delta_density_dust'] = (\n 0.080588 - 0.048719j,\n 0.091607 - 0.134955j,\n 0.030927 - 0.136799j,\n 0.001451 - 0.090989j,\n)\nd['delta_v_dust'] = (\n -0.775380 + 0.308952j,\n -0.427268 + 0.448704j,\n -0.127928 + 0.313967j,\n -0.028963 + 0.158693j,\n)\ntstop = (0.1, 0.215443, 0.464159, 1.0)\nd['K_drag'] = tuple([rho_d / ts for rho_d, ts in zip(d['density_dust'], tstop)])\n\nPARAMETERS['N_4'] = d\n\n# ------------------------------------------------------------------------------------ #\n# DO NOT CHANGE BELOW\n\nrun_script(\n simulation_to_setup=SIMULATION,\n parameters_dict=PARAMETERS,\n run_directory=RUN_DIRECTORY,\n phantom_patch_file=PATCH_FILE,\n)\n","repo_name":"dmentipl/multigrain","sub_path":"code/scripts/dustywave_setup_and_run.py","file_name":"dustywave_setup_and_run.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"23277867959","text":"import entering as en\r\nimport my_functions as func\r\n\r\nk = 0\r\nshablon = input('Enter shablon')\r\nfunc_values = []\r\nen.x, en.x2, en.h, en.a, en.s = float(en.x), float(en.x2), float(en.h), float(en.a), float(en.s)\r\nwhile True:\r\n try:\r\n while en.x <= en.x2:\r\n if en.s == 1:\r\n try:\r\n func_values.append(func.g(en.x, en.a))\r\n en.x += en.h\r\n except ValueError:\r\n print('Error')\r\n elif en.s == 2:\r\n try:\r\n func_values.append(func.f(en.x, en.a))\r\n en.x += en.h\r\n except ValueError:\r\n print('Error')\r\n elif en.s == 3:\r\n try:\r\n func_values.append(func.y(en.x, en.a))\r\n en.x += en.h\r\n except ValueError:\r\n print('Error')\r\n print('; '.join(str(i) for i in func_values))\r\n for i in func_values:\r\n if str(i) == shablon:\r\n k += 1\r\n print(k)\r\n except KeyboardInterrupt:\r\n print('bye')\r\n ex = input('Do you wanna repeat?(y or n)')\r\n if ex == 'y':\r\n pass\r\n else:\r\n raise SystemExit()\r\n","repo_name":"haanaks/aip","sub_path":"lab05.py","file_name":"lab05.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14939257830","text":"import glob\nimport json\nimport os\nimport re\n\nPATH = \"/home/lr/kawamoto/m1/GradeSystem/essay/wi+locness/\"\nfiles = glob.glob(\n PATH+'json/*.json')\nfiles.sort()\n\n# jsonファイルからcefrランクを入手します\nprint(\"CEFR rank\")\ncefr_lsts = []\nfor file in files:\n ce_lst = []\n print(file)\n with open(file, \"r\") as lines_json:\n # df = json.load(f_json)\n # この場合decodeエラーが出てしまう\n # {\"text\":~~},のこのコロンがないため。\n # なので1行ずつ処理する\n for line in lines_json:\n df = json.loads(line)\n cefr = re.sub(\"\\...?|\\+\", \"\", df[\"cefr\"])\n ce_lst.append(cefr)\n cefr_lsts.append(ce_lst)\n print(len(ce_lst))\n\n# src文を入手\nprint(\"src\")\nfiles = glob.glob(\n PATH+'text/*.src')\nfiles.sort()\nsrc_lsts = []\n\nfor file in files:\n print(file)\n with open(file, \"r\") as f:\n s = f.read()\n sr_lst = s.strip().strip(\"@@@\").split(\"@@@\")\n src_lsts.append(sr_lst)\n print(len(sr_lst))\n\nprint(\"correct\")\n# correct文を入手\nfiles = glob.glob(\n PATH+'text/*.correct')\nfiles.sort()\n\ncorrect_lsts = []\nfor file in files:\n print(file)\n with open(file, \"r\") as f:\n s = f.read()\n cor_lst = s.strip().strip(\"@@@\").split(\"@@@\")\n correct_lsts.append(cor_lst)\n print(len(cor_lst))\n\nfor rank_lst, src_lst, cor_lst in zip(cefr_lsts, src_lsts, correct_lsts):\n for rank, src, cor in zip(rank_lst, src_lst, cor_lst):\n num = len(os.listdir(PATH+\"original/{}/\".format(rank)))\n with open(PATH+\"original/{}/{:04}.raw\".format(rank, num+1), \"w\") as f_ori:\n f_ori.write(src.strip())\n with open(PATH+\"correct/{}/{:04}.raw\".format(rank, num+1), \"w\") as f_cor:\n f_cor.write(cor.strip())\n\nprint(\"A1:\", len(os.listdir(PATH+\"original/A1/\")))\nprint(\"A2:\", len(os.listdir(PATH+\"original/A2/\")))\nprint(\"B1:\", len(os.listdir(PATH+\"original/B1/\")))\nprint(\"B2:\", len(os.listdir(PATH+\"original/B2/\")))\nprint(\"C1:\", len(os.listdir(PATH+\"original/C1/\")))\nprint(\"C2:\", len(os.listdir(PATH+\"original/C2/\")))\n","repo_name":"toshi835/GradeSystem","sub_path":"alignment/clean_wi+locness.py","file_name":"clean_wi+locness.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"15336907085","text":"import math\n\n\n# 定义一个函数\ndef my_abs(x):\n if x >= 0:\n return x\n else:\n return -x\n\nprint(my_abs(-440)) # => 440\n\n# 定义一个空函数\n# pass 占位符,可以先定义让代码先运行起来\n\n\ndef nop():\n pass\n\n\nprint(nop()) # => None\n\n# 对参数进行检查\n\n\ndef my_abs(x):\n if not isinstance(x, (int, float)):\n raise TypeError('错误的操作参数')\n if isinstance(x, bool):\n if x:\n return 1\n else:\n return 0\n if x >= 0:\n return x\n else:\n return -x\nprint(my_abs(True))\n\n# 函数返回多值\n\n\ndef move(x, y, step, angle=0):\n nx = x + step * math.cos(angle)\n ny = y + step * math.sin(angle)\n return nx, ny\n\nx, y = move(0, 0, 60, math.pi / 3)\nprint(x, y)\n","repo_name":"zenyuca/study-python3","sub_path":"syntax/function/Function.py","file_name":"Function.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"36602180303","text":"car = 'Hello'\n\ncars = [\n {\"make\": \"Ford\", \"model\": \"Fiesta\", \"mileage\": 23000, \"fuel_comsumed\": 460},\n {\"make\": \"Ford\", \"model\": \"Focus\", \"mileage\": 17000, \"fuel_comsumed\": 350},\n {\"make\": \"Mazda\", \"model\": \"MX-5\", \"mileage\": 49000, \"fuel_comsumed\": 900},\n {\"make\": \"Mini\", \"model\": \"Cooper\", \"mileage\": 31000, \"fuel_comsumed\": 235}\n]\n\n\ndef calculate_mpg(car, intro):\n print(intro)\n mpg = car['mileage'] / car['fuel_comsumed']\n name = f\"{car['make']} {car['model']}\"\n print(f\"{name} does {mpg} miles per gallon.\")\n\n\nfor car in cars:\n calculate_mpg(car, \"New Car\")\n","repo_name":"kienonline19/complete-python-course","sub_path":"python-course-content/arguments-and-parameters/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"39042564364","text":"# coding:utf-8\n\n#importando bibliotecas\nfrom flask_restful import Resource, reqparse\nfrom flask_jwt_extended import create_access_token, jwt_required\nimport mysql.connector\nimport config\n\n#Endpoint de login\nclass Login(Resource):\n\n #Funcao chamada que chama as demais funcoes\n def post(self):\n argumentos = reqparse.RequestParser()\n argumentos.add_argument(\"funcao\", type=str, required=True, help=\"O campo 'funcao' deve ser informado!\")\n argumentos.add_argument(\"login\", type = dict, required = True, help= \"O campo 'login' deve ser informado!\")\n argumentos.add_argument(\"parametros\", type = dict, required = True, help= \"O campo 'parametros' deve ser informado!\")\n args = argumentos.parse_args()\n chamar_funcao = \"Login.\"+ \"{}(self,{},{})\".format(args['funcao'], args['login'], args['parametros'])\n resultado = eval(chamar_funcao)\n return resultado\n\n\n #funcao para fazer login nas apis\n def logar(self,login, parametros):\n \n\n #conexao com o banco de dados\n conexao = mysql.connector.connect(\n\n host = config.conexao[\"host\"],\n user= config.conexao[\"user\"],\n password= config.conexao[\"password\"],\n database= config.conexao[\"db\"]\n )\n\n\n #verifica se o usuario e senha informados estao certos com o id passado\n cursor = conexao.cursor(buffered=True)\n comando = \"SELECT * FROM sis_conexoes_api WHERE user = '{}' and senha = '{}' and id = {}\".format(login['user'],login['senha'],login['id'])\n cursor.execute(comando)\n r = cursor.fetchone()\n\n conexao.close()\n \n\n #caso nao encontre as credenciais\n if r is None:\n return {\"message\":'usuario, senha ou id incorretos'},401\n\n\n #retorna token de acesso\n else:\n token_acesso = create_access_token(identity = r)\n return {\"access_token\":token_acesso}, 200\n\n def get(self):\n return \"nnn\"","repo_name":"ramoti/py-bemtevi","sub_path":"login/logar.py","file_name":"logar.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29351481674","text":"\"\"\" Communcation process implementing protocol \"\"\"\nfrom math import ceil\nimport time\nimport threading\nimport multiprocessing\nimport serial\nfrom bitstring import BitArray\nimport struct\nimport numpy as np\n\n# from data_processing_functions import create_frequency_vector\nfrom data_conversion import flatten, to_bytes, float_to_hex\nfrom data_processing_functions import *\n\nFP_SIZE = 16\nFP_DATA_BYTES = ceil(FP_SIZE/8)\nPOLY_DIM = 10\nEXTRA_DIM = 5\nFREQ_DIM = 4\n\n# Struct of commands\ncmds = { \"set_led\": 0b01100001,\n \"request_amplitude\": 0b01100010,\n \"param_frequencies\": 0b01100011,\n \"param_polynomial_features\": 0b01100100,\n \"param_extra_feature\": 0b01100101,\n \"param_magnitude_weights\": 0b01100110,\n \"param_phase_weights\": 0b01100111,\n \"param_phasor_magnitude\": 0b01101000,\n \"param_phasor_phase\": 0b01101001,\n \"param_model_id\": 0b01101010,\n \"update_model\" : 0b01101001\n }\n\nclass Communication:\n # 2 bytes\n input_buffer_size = 2*8\n n_transmit_attempts = 10\n n_receive_attempts = 5\n\n start_byte = bytes([255])\n \n\n def __init__(self,\n baud_rate, comport, # Serial\n mcu_ready, mcu_not_ready, # mcu status flags\n update_fpga_rx) -> None: # pipe, receiveing from system control\n \n self.baud_rate = baud_rate\n self.comport = comport\n self.mcu_ready = mcu_ready\n self.mcu_not_ready = mcu_not_ready\n self.update_fpga_rx = update_fpga_rx\n\n def serial_begin(self):\n \"\"\" Create serial object and start serial com\"\"\"\n # Construct serial object\n try:\n self.mcu_serial = serial.Serial(self.comport, self.baud_rate)\n except :\n print(\"NO PORT\")\n self.mcu_not_ready.set()\n \n # close and open (start)\n self.mcu_serial.close()\n self.mcu_serial.open()\n\n def handshake(self):\n \"\"\" Performs handshake at startup, if it fails exit program\"\"\"\n # Initialize receive byte, prevent reference before assignment\n receive_byte = bytes([0])\n\n # Skip if start up byte received\n if not self.mcu_ready.is_set():\n # Send start byte\n \n for transmit_attempt in range(Communication.n_transmit_attempts):\n # n attempts to transmit\n self.mcu_serial.write(Communication.start_byte)\n print(\"start byte attempt\", transmit_attempt)\n\n for receive_attempt in range(Communication.n_receive_attempts):\n # n attempts to receive\n if self.mcu_serial.in_waiting>0:\n receive_byte = self.mcu_serial.read(1)\n print(\"receive attempt\", receive_attempt)\n time.sleep(0.1)\n\n # If received break\n if receive_byte == bytes([125]):\n # Set mcu ready flag\n print(\"RECEIVED\")\n self.mcu_ready.set()\n break\n\n # Break out first loop\n if self.mcu_ready.is_set():\n print(\"Received start byte\")\n break\n\n # If not received break out program\n if not self.mcu_ready.is_set():\n self.mcu_not_ready.set()\n\n def led_cmd(self):\n \"\"\" when \"01100001\" => -- CMD: RX_LED [ascii: a] --> on/off \"\"\"\n # Toggle\n cmd = 97\n self.mcu_serial.write(bytes([cmd]))\n\n def amplitude(self): # Not used!\n \"\"\" when \"01100010\" => -- CMD: TX_AMPLITUDE [ascii: b] -->\"\"\"\n cmd = 98#\"b\"\n self.mcu_serial.write(bytes([cmd])) #cmd.encode(\"utf-8\")\n\n\n\n def frequencies(self, frequencies): # 4 Frequencies --> phase increase per sample (NOT NORMALIZED)\n \"\"\" when \"01100011\" => -- CMD: RX frequencies [ascii: c] --> 4 bytes per frequency (tot 12 bytes)\"\"\"\n cmd = 99 #\"c\"\n self.mcu_serial.write(bytes([cmd]))\n \n phase_incr = []\n for frequency in frequencies:\n phase_incr.append((frequency*2**32)/(100e6))\n\n raw_data = [float_to_hex(a, FP_SIZE) for a in flatten(phase_incr)]\n byte_array = bytes([cmds['param_frequencies']] + flatten([to_bytes(i, 2) for i in raw_data]))\n self.mcu_serial.write(byte_array)\n\n # for frequency in frequencies:\n # # Send frequency as 4 bytes (float --> 4 bytes)\n # frequency_bytes = struct.pack('f', frequency)\n # self.mcu_serial.write(frequency_bytes)\n \n def polynomial_features(self, frequencies, bandwidth): # [1, f , f^2 ....] NORMALIZED\n \"\"\" when \"01100100\" => -- CMD: RX polynomial_features [ascii: d] --> \"\"\"\n # TEST cmd = 100 #\"d\"\n # Normalize frequencies and create vector [[1,f_norm, f_norm^2 ... f_norm^7], [...],[...],[..]]\n frequencies_vector_normalized = []\n for frequency in frequencies:\n frequencies_vector_normalized.append(create_feature_vector_normalized(frequency,bandwidth))\n \n\n raw_data = [float_to_hex(a, FP_SIZE) for a in flatten(frequencies_vector_normalized)]\n byte_array = bytes([cmds['param_polynomial_features']] + flatten([to_bytes(i, 2) for i in raw_data]))\n \n self.mcu_serial.write(byte_array)\n\n def extra_feature(self, power):\n \"\"\" when \"01100101\" => -- CMD: RX extra_feature [ascii: e] --> 2 bytes \"\"\"\n cmd = 101 #\"e\"\n\n self.mcu_serial.write(bytes([cmd])) # Send message typpe\n self.mcu_serial.write(bytes([power])) # Send content\n\n # TODO: cast to 2 bytes per weight\n def magnitude_weights(self, weights):\n \"\"\" when \"01100110\" => -- CMD: RX magnitude_weights [ascii: f] --> 50 weights 2bytes per weight\"\"\"\n cmd = 102 #\"f\"\n self.mcu_serial.write(bytes([cmd]))\n self.mcu_serial.write(bytes(weights))\n\n def phasor_magnitude(self, magnitudes, ):\n \"\"\" when \"01101000\" => -- CMD: RX phasor_magnitude [ascii: h] --> 2 bytes per magnitude (tot 6) \"\"\"\n # cmd = 104 #\"h\"\n # Calculate X_hat (magnitude that is on the ultrasound input)\n X_hat_list = []\n for i, frequency_vector in enumerate(frequencies_vector_normalized):\n # multiply each element of power to the frequencies array\n print(\"FREQ vector\", frequency_vector)\n product_list = [[power_element*frequency_element for frequency_element in frequency_vector]for power_element in power_vector_normalized]\n flattened_product_list = list(itertools.chain(*product_list))\n # print(\"PRODUCT\", product_list)\n # print(\"PRODUCT FLATTEND \", flattened_product_list)\n # for j, power_element in enumerate(power_vector_normalized):\n # product_list.append(power_element*frequency_vector)\n\n # weights*product list (as dot operation) should result in scalar\n product_arr = np.array(flattened_product_list)\n # print(\"SIZES\", product_arr.shape, np.shape(product_list), product_list)\n abs_H = np.dot(current_model_params, product_arr)\n\n X_hat_list.append(amplitudes[i]/abs_H)\n self.mcu_serial.write(bytes([cmd]))\n self.mcu_serial.write(bytes(magnitudes)) # Send list of amplitudes as packets of bytes (total 6 bytes)\n\n def model_id(self, id):\n \"\"\" when \"01101010\" => -- CMD: RX model_id [ascii: j] --> 2 bytes \"\"\"\n cmd = 106 # \"j\"\n raw_data = [float_to_hex(a, FP_SIZE) for a in flatten(id)]\n byte_array = bytes([cmds[\"param_model_id\"]] + flatten([to_bytes(i, 2) for i in raw_data]))\n\n # id_bytes = struct.pack('h', int(id*100))\n # self.mcu_serial.write(bytes([cmd]))\n # self.mcu_serial.write(id_bytes)\n\n def update_model(self):\n \"\"\" when \"01101011\" => -- CMD: Update Model [ascii: k] \"\"\"\n # cmd = 105 # \"k\"\n self.mcu_serial.write(bytes([cmds['update_model']]))\n\n # TODO: Communication with controller\n def calibrate_quadrature_point(self):\n \"\"\" Calibrate quadrature point \"\"\"\n cmd = 103\n # Send q-point start command\n self.mcu_serial.write([cmd])\n\n # Wait for response\n # while\n # \n\ndef handshake_thread_target(mcu_serial, mcu_not_ready, mcu_ready):\n \"\"\" Performs handshake at startup, if it fails exit program\"\"\"\n global input_buffer_size\n # Skip if start up byte received\n if not mcu_ready.is_set():\n # Send start byte\n n_transmit_attempts = 10\n n_receive_attempts = 5\n start_byte = bytes([255])\n receive_byte = 0\n for transmit_attempt in range(n_transmit_attempts):\n # n attempts to transmit\n mcu_serial.write(start_byte)\n print(\"start byte attempt\", transmit_attempt)\n\n for receive_attempt in range(n_receive_attempts):\n # n attempts to receive\n if mcu_serial.in_waiting>0:\n receive_byte = mcu_serial.read(1)\n print(\"receive attempt\", receive_attempt)\n time.sleep(0.1)\n\n # If received break\n if receive_byte == bytes([125]):\n # Set mcu ready flag\n print(\"RECEIVED\")\n mcu_ready.set()\n break\n\n # Break out first loop\n if mcu_ready.is_set():\n print(\"Received start byte\")\n break\n\n # If not received break out program\n if not mcu_ready.is_set():\n mcu_not_ready.set()\n\ndef communication_thread_target(mcu_serial, mcu_ready):\n \"\"\" Implementation of transmit pattern:\n Each command contains 1 command byte followed by content, stop message with update model CMD\n when \"01100001\" => -- CMD: RX_LED [ascii: a] --> on/off\n when \"01100010\" => -- CMD: TX_AMPLITUDE [ascii: b] -->\n when \"01100011\" => -- CMD: RX frequencies [ascii: c] --> 4 bytes per frequency (tot 12 bytes)\n when \"01100100\" => -- CMD: RX polynomial_features [ascii: d] --> \n when \"01100101\" => -- CMD: RX extra_feature [ascii: e] --> 2 bytes\n when \"01100110\" => -- CMD: RX magnitude_weights [ascii: f] --> 50 weights 2bytes per weight\n when \"01100111\" => -- CMD: RX phase_weights [ascii: g] NOT USED\n when \"01101000\" => -- CMD: RX phasor_magnitude [ascii: h] --> 2 bytes per magnitude (tot 6)\n when \"01101001\" => -- CMD: RX phasor_phase [ascii: i] NOT USED\n when \"01101010\" => -- CMD: RX model_id [ascii: j] --> 2 bytes\n when \"01101011\" => -- CMD: Update Model [ascii: k]\n \n \"\"\"\n # RX_LED = \n mcu_ready.wait()\n RX_LED = ord('a')\n RX_PHASOR_MAGNITUDE = ord('h')\n MODEL_ID = ord('j')\n UPDATE_MODEL = ord('k')\n\n while True:\n print(\"A\",RX_LED )\n time.sleep(1)\n # RX_LED\n mcu_serial.write(RX_LED)\n # TX_AMPLITUDE\n\n # RX frequencies\n\n # RX polynomial_features\n\n # RX extra_feature\n\n # RX magnitude_weights\n\n # RX phasor_magnitude\n mcu_serial.write(RX_PHASOR_MAGNITUDE)\n # RX model_id\n mcu_serial.write(MODEL_ID)\n # Update Model\n mcu_serial.write(UPDATE_MODEL)\n time.sleep(1)\n mcu_serial.write(RX_LED)\n\n\ndef communication_process_target(port, baud_rate, mcu_not_ready, mcu_ready,\n update_fpga_rx, bandwidth):\n# Create communication\n mcu_communication = Communication(baud_rate,port,mcu_ready,mcu_not_ready, update_fpga_rx)\n\n# Begin communication\n mcu_communication.serial_begin()\n\n# Handshake\n mcu_communication.handshake()\n\n # Create threads\n # # Startup handshake\n # handshake_thread = threading.Thread(\n # target=handshake_thread_target ,\n # args=(mcu_serial,mcu_not_ready, mcu_ready)\n # )\n # Communication handler\n # communication_thread = threading.Thread(\n # target=communication_thread_target,\n # args=(mcu_serial,mcu_ready)\n # )\n\n# Start threads\n # handshake_thread.start()\n # communication_thread.start()\n\n while True:\n\n # If there is content in the pipe\n if update_fpga_rx.poll():\n content_to_send = update_fpga_rx.recv()\n print(\"UPDATE FPGA\")\n # GEt id\n # TODO: Why 1:7\n model_id = content_to_send[0]\n # Get frequencies\n frequencies = content_to_send[1:7]\n # Get model params\n model_params = content_to_send[7:12]\n # Get amplitudes\n amplitudes = content_to_send[12:]\n\n # Test poly features \n polynomial_features = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [2, 2, 2, 2, 2, 2, 2, 2, 2, 2],\n [3, 3, 3, 3, 3, 3, 3, 3, 3, 3]]\n # Send ID\n # mcu_communication.led_cmd()\n mcu_communication.polynomial_features(polynomial_features)\n # mcu_communication.model_id(model_id)\n # mcu_communication.frequencies(frequencies)\n # mcu_communication.magnitude_weights(model_params)\n # mcu_communication.phasor_magnitude(amplitudes)\n\n\n # # Send phasor magnitude\n # mcu_communication.phasor_magnitude(frequencies,model_params,amplitudes)\n # Send frequencies \n \n # # Update\n mcu_communication.update_model()","repo_name":"Cinbarker/FPGA_BAP","sub_path":"communication_process_FROM_JP.py","file_name":"communication_process_FROM_JP.py","file_ext":"py","file_size_in_byte":13611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32755706361","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport pandas as pd\nimport plotly.graph_objs as go\nfrom tabs import tab_functions as tf\nimport flask\n\ndf = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/gapminder2007.csv')\ncontinent = df['continent'].unique()\ncontinent = sorted(continent,reverse=False)\n\ntab_1_layout = html.Div(\n id=\"app-container\",\n children=[\n html.Br(),\n tf.single_value_dropdown(\"single-dropdown-container\",\"continent-dropdown\",\"Continent Selection\",continent),\n tf.multi_value_dropdown('multi-dropdown',\"Continent selections\",continent),\n dcc.Interval(id=\"interval-component\",\n interval=10*1000,\n n_intervals=50,\n disabled=True),\n html.Div(\n id=\"graph-container\",\n children=[\n #html.Div(id=\"dropdown-content\"), used as a debugger, uncomment in callback_functions.py\n html.Br(),\n tf.generate_banner(\"Graph Section\"),\n dcc.Graph(id=\"graph-chart\")]),])\n","repo_name":"HumzaA94/random_projects","sub_path":"multi_tab_dashboard_template/tabs/dropdown_tab.py","file_name":"dropdown_tab.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5992613734","text":"from django.db.models import Q\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.contrib import messages\n\nfrom .models import Project, Tag\n\n\ndef search_projects(request):\n search_query = \"\"\n project_list = None\n\n if request.GET.get('search_field'):\n search_query = request.GET.get('search_field')\n\n tag_list = Tag.objects.filter(\n Q(name__icontains=search_query)\n )\n\n project_list = Project.objects.distinct().filter(\n Q(title__icontains=search_query) |\n Q(description__icontains=search_query) |\n Q(owner__name__icontains=search_query) |\n Q(tags__in=tag_list)\n )\n\n else:\n project_list = Project.objects.all()\n\n return project_list, search_query\n\n\ndef paginate_projects(request, projects):\n results = 3\n page = 1\n projects = list(projects)\n paginator = Paginator(projects, results)\n\n try:\n page = request.GET.get('page', page)\n\n project_list = paginator.page(page)\n\n except EmptyPage:\n messages.error(request, \"That page does not exist\")\n page = 1\n project_list = paginator.page(page)\n\n except PageNotAnInteger:\n messages.error(request, \"That page is invalid.\")\n page = 1\n project_list = paginator.page(page)\n\n left_index = (int(page) - 4)\n\n if left_index < 1:\n left_index = 1\n\n right_index = (int(page) + 5)\n\n if right_index > paginator.num_pages:\n right_index = paginator.num_pages + 1\n\n custom_range = range(left_index, right_index)\n\n return project_list, custom_range\n","repo_name":"humstack/devsearch","sub_path":"projects/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30798142159","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\n#chrome 要改成94版本,不然會跑不動\nfrom time import sleep\nfrom selenium import webdriver\n\n#記得上網下載chromedriver\nimport os\npath = \"/Users/linyuci/Downloads/chromedriver 3\"\n\nprint(path)\n\n\ndriver = webdriver.Chrome(path)\ndriver.get(\"https://popcat.click/\")\na=driver.find_element_by_xpath('//*[@id=\"app\"]/div')\nwhile True:\n a.click()\n sleep(0.055)\n\n","repo_name":"ritalinyutzu/POP_CAT","sub_path":"pop_cat.py","file_name":"pop_cat.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"3921214204","text":"a,b,c=map(int, input().split())\r\n\r\ndef d(a,b):\r\n if b%2:\r\n return d(a,b-1)*a\r\n elif b==0:\r\n return 1\r\n elif b==1:\r\n return a\r\n else:\r\n x=d(a,b//2)\r\n return x*x%c\r\n\r\nprint(d(a,b)%c)","repo_name":"2021-01-06/baekjoon","sub_path":"1629.py","file_name":"1629.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21751403068","text":"# coding=utf-8\n# Функция поиска подстроки в строке\n# НЕ допускать к регистрации мейлы с определенными символами, словами или их комбинациями\n# Вспомогательная функция\n\ndef check_symbols(restricted_symbols, s): #Опредление функции проверки символов\n for symb in restricted_symbols:\n if symb in s:\n return True\n return False\n\n","repo_name":"vitalyvels/Skillbox","sub_path":"Lesson_1/Helper03Functions.py","file_name":"Helper03Functions.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8180342307","text":"import discord, asyncio, json, requests, typing\nfrom discord import ext\nfrom discord.ext import commands\nfrom discord.ext.commands import (\n Bot,\n bot_has_permissions,\n MissingPermissions,\n has_permissions,\n)\nimport emojis\n\n\nclass ReactionRolesNotSetup(commands.CommandError):\n \"\"\"reaction roles not setup for the current guild\"\"\"\n\n pass\n\n\ndef is_setup():\n async def wrap_func(ctx):\n data = await ctx.bot.config.find(ctx.guild.id)\n if data is None:\n raise ReactionRolesNotSetup\n\n if data.get(\"message_id\") is None:\n raise ReactionRolesNotSetup\n\n return True\n\n return commands.check(wrap_func)\n\n\nclass Reactions(commands.Cog, name=\"ReactionRoles\"):\n def __init__(self, bot):\n self.bot = bot\n\n async def rebuild_role_embed(self, guild_id):\n data = await self.bot.config.find(guild_id)\n channel_id = data[\"channel_id\"]\n message_id = data[\"message_id\"]\n\n guild = await self.bot.fetch_guild(guild_id)\n channel = await self.bot.fetch_channel(channel_id)\n message = await channel.fetch_message(message_id)\n\n embed = discord.Embed(title=\"Reaction Roles!\")\n await message.clear_reactions()\n\n desc = \"\"\n reaction_roles = await self.bot.reaction_roles.get_all()\n reaction_roles = list(\n filter(lambda r: r[\"guild_id\"] == guild_id, reaction_roles)\n )\n\n for item in reaction_roles:\n role = guild.get_role(item[\"role\"])\n desc += f\"{item['_id']}: {role.mention}\\n\"\n await message.add_reaction(item[\"_id\"])\n\n embed.description = desc\n await message.edit(embed=embed)\n\n async def get_current_reactions(self, guild_id):\n data = await self.bot.reaction_roles.get_all()\n data = filter(lambda r: r[\"guild_id\"] == guild_id, data)\n data = map(lambda r: r[\"_id\"], data)\n return list(data)\n\n @commands.group(aliases=[\"rr\"], invoke_without_command=True)\n @commands.guild_only()\n async def reactionroles(self, ctx):\n await ctx.invoke(self.bot.get_command(name=\"help\"), entity=\"reactionroles\")\n\n @reactionroles.command(name=\"channel\")\n @commands.guild_only()\n @commands.has_guild_permissions(manage_channels=True)\n async def rr_channel(self, ctx, channel: discord.TextChannel = None):\n if channel is None:\n await ctx.send(\"No channel given. Using current channel...\")\n channel = channel or ctx.channel\n try:\n await channel.send(\n \"Testing if I can send messages here.\", delete_after=0.05\n )\n except discord.HTTPException:\n await ctx.send(\n \"I cannot send messages to that channel! Try again after giving me permissions.\"\n )\n return\n\n embed = discord.Embed(title=\"Reaction Roles!\")\n\n desc = \"\"\n reaction_roles = await self.bot.reaction_roles.get_all()\n reaction_roles = list(\n filter(lambda r: r[\"guild_id\"] == ctx.guild.id, reaction_roles)\n )\n for item in reaction_roles:\n role = ctx.guild.get_role(item[\"role\"])\n desc += f\"{item['_id']}: {role.mention}\\n\"\n embed.description = desc\n\n m = await ctx.send(embed=embed)\n for item in reaction_roles:\n await m.add_reaction(item[\"_id\"])\n\n await self.bot.config.upsert(\n {\n \"_id\": ctx.guild.id,\n \"message_id\": m.id,\n \"channel_id\": m.channel.id,\n \"is_enabled\": True,\n }\n )\n await ctx.send(\"Should be all set up for you now!\")\n\n @reactionroles.command(name=\"toggle\")\n @commands.guild_only()\n @commands.has_guild_permissions(administrator=True)\n @is_setup()\n async def rr_toggle(self, ctx):\n data = await self.bot.config.find(ctx.guild.id)\n data[\"is_enabled\"] = not data[\"is_enabled\"]\n await self.bot.config.upsert(data)\n\n is_enabled = \"enabled.\" if data[\"is_enabled\"] else \"disabled.\"\n await ctx.send(f\"I've toggled that for you! It's currently {is_enabled}\")\n\n @reactionroles.command(name=\"add\")\n @commands.guild_only()\n @commands.has_guild_permissions(administrator=True)\n @is_setup()\n async def rr_add(\n self, ctx, emoji: typing.Union[discord.Emoji, str], *, role: discord.Role\n ):\n reacts = await self.get_current_reactions(ctx.guild.id)\n if len(reacts) >= 20:\n await ctx.send(\n \"This does not support more than 20 Reaction roles per guild!\"\n )\n\n # if not isinstance(emoji, discord.Emoji):\n # \temoji = emojis.get(emoji)\n # \temoji = emoji.pop()\n\n if isinstance(emoji, discord.Emoji):\n if not emoji.is_usable():\n await ctx.send(\"I cannot use this emoji!\")\n return\n\n emoji = str(emoji)\n await self.bot.reaction_roles.upsert(\n {\"_id\": emoji, \"role\": role.id, \"guild_id\": ctx.guild.id}\n )\n\n await self.rebuild_role_embed(ctx.guild.id)\n await ctx.send(\"This is added and good to go!\")\n\n @reactionroles.command(name=\"remove\")\n @commands.guild_only()\n @commands.has_guild_permissions(administrator=True)\n @is_setup()\n async def rr_remove(self, ctx, emoji: typing.Union[discord.Emoji, str]):\n if not isinstance(emoji, discord.Emoji):\n emoji = emojis.get(emoji)\n emoji = emoji.pop()\n\n emoji = str(emoji)\n await self.bot.reaction_roles.delete(emoji)\n await self.rebuild_role_embed(ctx.guild.id)\n await ctx.send(\"That should have been removed for you!\")\n\n @commands.Cog.listener()\n async def on_raw_reaction_add(self, payload):\n data = await self.bot.config.find(payload.guild_id)\n\n if not payload.guild_id or not data or not data.get(\"is_enabled\"):\n return\n\n guild_reaction_roles = await self.get_current_reactions(payload.guild_id)\n if str(payload.emoji) not in guild_reaction_roles:\n return\n\n guild = await self.bot.fetch_guild(payload.guild_id)\n\n emoji_data = await self.bot.reaction_roles.find(str(payload.emoji))\n role = guild.get_role(emoji_data[\"role\"])\n\n member = await guild.fetch_member(payload.user_id)\n\n if role not in member.roles:\n await member.add_roles(role, reason=\"Reaction role.\")\n\n @commands.Cog.listener()\n async def on_raw_reaction_remove(self, payload):\n data = await self.bot.config.find(payload.guild_id)\n\n if not payload.guild_id or not data or not data.get(\"is_enabled\"):\n return\n\n guild_reaction_roles = await self.get_current_reactions(payload.guild_id)\n if str(payload.emoji) not in guild_reaction_roles:\n return\n\n guild = await self.bot.fetch_guild(payload.guild_id)\n\n emoji_data = await self.bot.reaction_roles.find(str(payload.emoji))\n role = guild.get_role(emoji_data[\"role\"])\n\n member = await guild.fetch_member(payload.user_id)\n\n if role in member.roles:\n await member.remove_roles(role, reason=\"Reaction role.\")\n\n\ndef setup(bot):\n bot.add_cog(Reactions(bot))\n","repo_name":"nosnowowie93347/Terrabot","sub_path":"cogs/reaction.py","file_name":"reaction.py","file_ext":"py","file_size_in_byte":7251,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"11962662715","text":"# Vanessa Dunford\n# Github: https://github.com/vanicci\n# Linkedin: https://www.linkedin.com/in/vanessa-dunford-08ab7663/\n# Youtube: http://bit.ly/JoinMeOnYouTube\n# Twitter: https://twitter.com/vaniccilondon\n\n# Table of contents. Here’s something for you to do in order to play around more with center, ljust, and rjust: write a program that will display a table of contents so that it looks like this:\n# Chapter 1: Getting Started page 1 Chapter 2: Numbers page 9 Chapter 3: Letters page 13\n\nmylist = [[\"Chapter 1\", \"Getting Started\", 1],\n\t\t\t[\"Chapter 2\", \"Numbers\", 9],\n\t\t\t[\"Chapter 3\", \"Letters\", 13],]\n\nprint(\": Chapter of the Book : Subject Title : Page Number :\")\n\nfor item in mylist:\n\tprint(\":\", item[0], \" \"*(18-len(item[0])),\":\",\n\t\titem[1], \" \"*(16-len(item[1])),\":\",\n\t\titem[2], \" \"*(10-len(str(item[2]))),\":\")\n","repo_name":"vdunford/01-Intro-to-Python-14-Tasks","sub_path":"05-TableOfContents.py","file_name":"05-TableOfContents.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30173031800","text":"import torch\nfrom transformers import T5ForConditionalGeneration, T5Tokenizer, Trainer, TrainingArguments\nfrom typing import List, Dict\n\n\nclass ChatbotT5:\n def __init__(self, model_path: str = None):\n self.tokenizer = T5Tokenizer.from_pretrained('t5-base')\n if model_path is None:\n self.model = T5ForConditionalGeneration.from_pretrained('t5-base')\n else:\n self.model = T5ForConditionalGeneration.from_pretrained(model_path)\n\n def prepare_data(self, dataset: List[Dict[str, str]], max_length: int = 128):\n input_texts = [d['question'] for d in dataset]\n output_texts = [d['response'] for d in dataset]\n\n inputs = self.tokenizer.batch_encode_plus(\n input_texts,\n padding=True,\n max_length=max_length,\n truncation=True,\n return_tensors='pt'\n )\n\n outputs = self.tokenizer.batch_encode_plus(\n output_texts,\n padding=True,\n max_length=max_length,\n truncation=True,\n return_tensors='pt'\n )\n\n input_ids = inputs['input_ids']\n input_attention_mask = inputs['attention_mask']\n output_ids = outputs['input_ids']\n output_attention_mask = outputs['attention_mask']\n\n data = []\n for i in range(len(dataset)):\n data.append({\n 'input_ids': input_ids[i],\n 'attention_mask': input_attention_mask[i],\n 'labels': output_ids[i],\n 'decoder_attention_mask': output_attention_mask[i],\n })\n\n return data\n\n def fine_tune(self, train_dataset, eval_dataset, model_dir: str = 'model',\n num_train_epochs: int = 30, batch_size: int = 8):\n training_args = TrainingArguments(\n output_dir=model_dir,\n num_train_epochs=num_train_epochs,\n per_device_train_batch_size=batch_size,\n per_device_eval_batch_size=batch_size,\n logging_dir=model_dir+'/logs',\n logging_steps=10,\n evaluation_strategy='epoch',\n save_strategy='epoch',\n save_total_limit=3,\n eval_steps=10,\n load_best_model_at_end=True,\n )\n\n trainer = Trainer(\n model=self.model,\n args=training_args,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset\n )\n\n trainer.train()\n\n self.model.save_pretrained(model_dir)\n\n def generate_response(self, input_text: str, max_length: int = 4016):\n input_ids = self.tokenizer.encode(\n input_text,\n padding=True,\n max_length=max_length,\n truncation=True,\n return_tensors='pt'\n )\n\n output_ids = self.model.generate(\n input_ids=input_ids,\n max_length=max_length,\n num_beams=4,\n early_stopping=True,\n do_sample=True,\n top_k=50,\n top_p=0.95,\n temperature=0.7,\n )\n\n response = self.tokenizer.decode(output_ids[0], skip_special_tokens=True)\n\n return response","repo_name":"MuhammadHamzaAhmed/Chatbot","sub_path":"chatbot/adam/training/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70533324644","text":"# bus app v2.0\n# june 2019 - by anthony@bitsandatoms.net\n\nimport logging\nimport os\nimport threading\nimport time\n\nfrom flask import send_from_directory\nfrom flask import Flask, render_template, request\nfrom flask_bootstrap import Bootstrap\nfrom flask import jsonify\nfrom flask_cors import CORS, cross_origin\n\nfrom lib import API\nfrom lib import NJTransitAPI\nfrom lib import wwwAPI\nfrom lib.TransitSystem import load_system_map\n\n################################################\n# VIP INSTANCE CONFIG\n################################################\nsource_global='nj'\nclass Dummy():\n def __init__(self):\n self.routename = 'Jersey City'\n\n################################################\n# APP\n################################################\napp = Flask(__name__, static_url_path='/static')\nCORS(app, support_credentials=True)\n\n################################################\n# BOOTSTRAP\n################################################\napp.config.update(\n BOOTSTRAP_CDN_FORCE_SSL=True\n)\nBootstrap(app)\n\n\n# ################################################\n# # CACHE SETUP (unused)\n# ################################################\n# from flask_caching import Cache\n# cache = Cache(app,config={'CACHE_TYPE': 'simple'})\n\n\n################################################\n# LOGGING\n# per https://medium.com/@trstringer/logging-flask-and-gunicorn-the-manageable-way-2e6f0b8beb2f\n################################################\nif __name__ != \"__main__\":\n gunicorn_logger = logging.getLogger(\"gunicorn.error\")\n app.logger.handlers = gunicorn_logger.handlers\n app.logger.setLevel(gunicorn_logger.level)\n\n\n################################################\n# STATIC ASSETS\n################################################\nfrom flask_assets import Bundle, Environment\nbundles = {\n 'route_css': Bundle(\n 'css/theme.css',\n 'css/theme.scss',\n output='gen/route.css'),\n}\nassets = Environment(app)\nassets.register(bundles)\n\n\n################################################\n# SYSTEM_MAP RELOADER\n################################################\n\n@app.before_first_request\ndef activate_job():\n def run_job():\n while True:\n print(\"app.py recurring task every 10 mins...\")\n system_map=load_system_map()\n time.sleep(600)\n\n thread = threading.Thread(target=run_job)\n thread.start()\n\n\n\n\n################################################\n# URLS\n################################################\n\n@app.route('/')\ndef displayIndex():\n\n vehicle_data = NJTransitAPI.parse_xml_getBusesForRouteAll(NJTransitAPI.get_xml_data('nj','all_buses'))\n vehicle_count = len(vehicle_data)\n route_count = len(list(set([v.rt for v in vehicle_data])))\n\n # vehicle_data, vehicle_count, route_count = API.current_buspositions_from_db_for_index()\n\n routereport = Dummy() # setup a dummy routereport for the navbar\n return render_template('index.jinja2',\n collection_descriptions=system_map.collection_descriptions,\n routereport=routereport,\n vehicle_count=vehicle_count,\n route_count=route_count)\n\n@app.route('/')\ndef displayCollection(collection_url):\n vehicles_now = API.get_positions_byargs(system_map,\n {'collection': collection_url, 'layer': 'vehicles'},\n system_map.route_descriptions,\n system_map.collection_descriptions)\n collection_description=system_map.collection_descriptions[collection_url]\n collection_description['number_of_active_vehicles'] = len(vehicles_now['features'])\n collection_description['number_of_active_routes'] = len(system_map.collection_descriptions[collection_url]['routelist'])\n route_report = Dummy() # setup a dummy routereport for the navbar\n return render_template('collection.jinja2',\n collection_url=collection_url,\n grade_roster=system_map.grade_roster,\n collection_description=collection_description,\n route_descriptions=system_map.route_descriptions,\n period_descriptions=system_map.period_descriptions,\n routereport=route_report)\n\n@app.route('//route//')\ndef genRouteReport(collection_url,route, period):\n route_report = wwwAPI.RouteReport(system_map, route, period)\n return render_template('route.jinja2',\n collection_url=collection_url,\n collection_descriptions=system_map.collection_descriptions,\n route=route,\n period=period,\n period_descriptions=system_map.period_descriptions,\n routereport=route_report)\n\n@app.route('//route//stop//')\ndef genStopReport(collection_url, route, stop, period):\n stop_report = wwwAPI.StopReport(system_map, route, stop, period)\n route_report = wwwAPI.RouteReport(system_map, route, period)\n predictions = NJTransitAPI.parse_xml_getStopPredictions(NJTransitAPI.get_xml_data('nj', 'stop_predictions', stop=stop, route='all'))\n return render_template('stop.jinja2',\n collection_url=collection_url,\n collection_descriptions=system_map.collection_descriptions,\n period_descriptions=system_map.period_descriptions,\n stop=stop, period=period,\n stopreport=stop_report,\n reportcard_routes=system_map.route_descriptions,\n predictions=predictions,\n routereport=route_report)\n\n\n@app.route('//route//trip/')\ndef genTripReport(collection_url, route, trip_id):\n trip_report = wwwAPI.TripReport(system_map, route,trip_id)\n route_report = wwwAPI.RouteReport(system_map, route, 'day')\n return render_template('trip.jinja2',\n collection_url=collection_url,\n collection_descriptions=system_map.collection_descriptions,\n period_descriptions=system_map.period_descriptions,\n trip_id=trip_id,\n reportcard_routes=system_map.route_descriptions,\n routereport=route_report,\n trip_report=trip_report)\n\n@app.route('/about')\ndef displayFAQ():\n routereport = Dummy() # setup a dummy routereport for the navbar\n return render_template('about.jinja2',\n route_definitions=system_map.route_descriptions,\n routereport=routereport)\n\n\n@app.route('/favicon.ico')\ndef favicon():\n return send_from_directory(os.path.join(app.root_path, 'static/images'),'favicon.ico',mimetype='image/vnd.microsoft.icon')\n\n\n################################################\n# API\n# map layer geojson generator\n################################################\n\n\n@app.route('/api/v1/maps/vehicles')\n@cross_origin()\ndef api_vehicles():\n args=dict(request.args)\n args['layer'] = 'vehicles'\n return jsonify(API.get_positions_byargs(\n system_map,\n args,\n system_map.route_descriptions,\n system_map.collection_descriptions\n ))\n\n@app.route('/api/v1/maps/waypoints')\n@cross_origin()\ndef api_waypoints():\n args=dict(request.args)\n args['layer'] = 'waypoints'\n return jsonify(system_map.render_geojson(args))\n\n@app.route('/api/v1/maps/stops')\n@cross_origin()\ndef api_stops():\n args=dict(request.args)\n args['layer'] = 'stops'\n return jsonify(system_map.render_geojson(args))\n\n################################################\n# ERROR HANDLER\n################################################\n@app.errorhandler(404)\ndef page_not_found(e):\n routereport = Dummy() # setup a dummy routereport for the navbar\n return render_template('404.jinja2', route_report=routereport), 404\n\n\n################################################\n# CUSTOM FILTERS\n################################################\n\n@app.template_filter('strftime_today')\ndef _jinja2_filter_datetime(timestamp, format='%I:%M %p'):\n return timestamp.strftime(format)\n\n@app.template_filter('strftime_period')\ndef _jinja2_filter_datetime_by_period(timestamp, period):\n return timestamp.strftime(system_map.period_descriptions[period]['strftime_format'])\n\n@app.template_filter('hour_as_int')\ndef _jinja2_filter_hour_as_int(hour):\n hour = int(hour)\n pretty_time = ''\n if hour == 0:\n pretty_time = (\"12 am\")\n elif (hour > 0 and hour <10):\n pretty_time = (\" {a} am\").format(a=hour)\n elif (hour == 10 or hour == 11):\n pretty_time = (\"{a} am\").format(a=hour)\n elif hour == 12:\n pretty_time = (\"12 pm\")\n elif (hour > 12 and hour < 24):\n hour = hour -12\n pretty_time = (\" {a} pm\").format(a=hour)\n elif hour > 23:\n hour = hour - 12\n pretty_time = (\"{a} pm\").format(a=hour)\n return pretty_time\n\n@app.template_filter('strftime_forever')\ndef _jinja2_filter_datetime(timestamp, format='%Y-%m-%d %I:%M %p'):\n return timestamp.strftime(format)\n\n@app.template_filter('title')\ndef _jinja2_filter_titlecase(name):\n return name.title()\n\n@app.template_filter('strftime_timedelta')\ndef pretty_timedelta(td):\n days = td.days\n hours, remainder = divmod(td.seconds, 3600)\n minutes, seconds = divmod(remainder, 60)\n if days != 0:\n pretty_time = (\"{a} days {b} hrs {c} mins\").format(a=days, b=hours, c=minutes)\n return pretty_time\n elif hours != 0:\n pretty_time = (\"{a} hrs {b} mins\").format(a=hours, b=minutes)\n return pretty_time\n else:\n pretty_time = (\"{a} mins\").format(a=minutes)\n return pretty_time\n\n@app.template_filter('split_')\ndef splitpart (value, index, char = '_'):\n return value.split(char)[index]\n\n################################################\n# MAIN SCRIPT\n################################################\n\nif __name__ == \"__main__\":\n system_map=load_system_map()\n app.run(host='0.0.0.0', debug=True)\n\n\n# after https://medium.com/@trstringer/logging-flask-and-gunicorn-the-manageable-way-2e6f0b8beb2f\nif __name__ != \"__main__\":\n system_map = load_system_map()\n gunicorn_logger = logging.getLogger(\"gunicorn.error\")\n app.logger.handlers = gunicorn_logger.handlers\n app.logger.setLevel(gunicorn_logger.level)\n\n\n","repo_name":"anthonymobile/njbuswatcher_2020","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10623,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"27892667778","text":"import numpy as n\nfrom matplotlib import pyplot as p\nimport scipy.stats as stats\nfrom astropy.io import fits\nimport numpy.random as r\nimport scipy.optimize as opt\nimport sklearn.mixture as GMM\n\ndef pdf_model(x, theta):\n mu1, sig1, mu2, sig2, al_1 = theta\n return al_1*normpdf(x, mu1, sig1) + (1-al_1)*normpdf(x, mu2, sig2)\n\ndef log_likelihood_two_1d_gauss(theta, sample):\n \treturn -n.log(pdf_model(sample, theta)).sum()\n\ndef main():\n\thdulist = fits.open('GC_NGC4365.fits')\n\t#hdulist.info()\n\t#data in form [objID,run,rerun,camcol,field,obj,type,ra,dec,u,g,r,i,z,\n\t#err_u,err_g,err_r,err_i,err_z]\n\tdata = hdulist[1].data\n\thdr = hdulist[1].header\n\t#hdr.keys()\n\t#data.columns.info()\n\thdulist.close()\n\tra = data['RAJ2000']\n\tdec = data['DEJ2000']\n\tgmag = data['gmag']\n\timag = data['imag']\n\tgi = gmag - imag\n\tp.hist(gi,bins=50)\n\t\n # Initial guess of parameters [mu1, sig1, mu2, sig2, a_1]\n #by eye there looks like at least two gaussians\n\ttheta0 = n.array([0.8,0.1,1.0,0.2,0.5])\n\t#use the minimize package of scipy optimize\n\t#there are a plethora of method options ‘Nelder-Mead’,‘Powell’,‘CG’,\n\t#‘BFGS’,‘Newton-CG’,‘Anneal',‘L-BFGS-B’,‘TNC’,‘COBYLA’,‘SLSQP’,‘dogleg’,\n\t#‘trust-ncg’\n\t#res = opt.minimize(log_likelihood_two_1d_gauss, x0=theta0, args=(gi,), method='BFGS')\n\t#test different amount of components\n\tmeans = list()\n\tweights = list()\n\tcovars = list()\n\taics = list()\n\tfor i in range(5):\n\t\tmodel = GMM(i+1)\n\t\tmodel.fit(gi)\n\t\tmeans.append(model.means_.flatten())\n\t\t#weights\n\t\tweights.append(model.weights_.flatten())\n\t\t#covariances\n\t\tcovars.append(model.covars_.flatten())\n\t\tM_best = model\n\t\taics.append(M_best.aic(gi))\n\t#the GCs are best fit with just 2 components\n\n\tmodel = GMM(2)\n\tmodel.fit(gi)\n\t#print the means\n\tmodel.means_.flatten()\n\t#weights\n\tmodel.weights_.flatten()\n\t#covariances\n\tmodel.covars_.flatten()\n\tM_best = model\n\tx = n.arange(.6, 1.3,.01)\n\tlogprob,respons = M_best.score_samples(x)\n\tpdf = n.exp(logprob)\n\tpdf_individual = respons * pdf[:,n.newaxis]\n\t#plot the figure\n\tfig, ax = p.subplots(1,1,figsize=(10,7))\n\tax.hist(gi,50,normed=True,histtype='stepfilled',alpha=0.4,label='data')\n\tax.plot(x,pdf,'-k',lw=3,label=\"Best fit\")\n\tax.plot(x,pdf_individual[:,0],'--',c='k',label='component 1')\n\tax.plot(x,pdf_individual[:,1],'-',c='k',label='component 2')\n\tax.legend()\n\tax.set_xlable('x',fontsize=20)\n\t#calculate the Akaike information criteria for the model\n\tM_best.aic(gi)\n\t\n\t#get the component each object likely belongs to\t\n\tcomponent = M_best.predict(gi)\n\tra1 = ra[n.where(component==0)]\n\tra2 = ra[n.where(component==1)]\n\tdec1 = dec[n.where(component==0)]\n\tdec2 = dec[n.where(component==1)]\n\tp.plot(ra1,dec1,'b.',label='component 1')\n\tp.plot(ra2,dec2,'r.',label='component 2')\n\tp.hist(ra1,bins=50,alpha=.4,color='red')\n\tp.hist(ra2, bins=50,alpha=.4,color='blue')\n\tp.hist(dec1,bins=50,alpha=.4,color='red')\n\tp.hist(dec2, bins=50,alpha=.4,color='blue')\n\t#the GC are distributed differently\n\nif __name__ == '__main__':\n main()","repo_name":"arnesonr/astro_stats","sub_path":"EM_method.py","file_name":"EM_method.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30975994285","text":"# https://deeplearningcourses.com/c/machine-learning-in-python-random-forest-adaboost\n# https://www.udemy.com/machine-learning-in-python-random-forest-adaboost\nfrom __future__ import print_function, division\nfrom builtins import range, input\n# Note: you may need to update your version of future\n# sudo pip install -U future\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.utils import shuffle\nfrom util import plot_decision_boundary\n\nnp.random.seed(10)\n\n# create the data\nN = 500\nD = 2\nX = np.random.randn(N, D)\n\n# 2 gaussians\n# sep = 1.5\n# X[:N/2] += np.array([sep, sep])\n# X[N/2:] += np.array([-sep, -sep])\n# Y = np.array([0]*(N/2) + [1]*(N/2))\n\n# noisy XOR\nsep = 2\nX[:125] += np.array([sep, sep])\nX[125:250] += np.array([sep, -sep])\nX[250:375] += np.array([-sep, -sep])\nX[375:] += np.array([-sep, sep])\nY = np.array([0]*125 + [1]*125 + [0]*125 + [1]*125)\n\n# plot the data\nplt.scatter(X[:,0], X[:,1], s=100, c=Y, alpha=0.5)\nplt.show()\n\n# lone decision tree\nmodel = DecisionTreeClassifier()\nmodel.fit(X, Y)\nprint(\"score for 1 tree:\", model.score(X, Y))\n\n# plot data with boundary\nplt.scatter(X[:,0], X[:,1], s=100, c=Y, alpha=0.5)\nplot_decision_boundary(X, model)\nplt.show()\n\n\n# create the bagged model\nclass BaggedTreeClassifier:\n def __init__(self, B):\n self.B = B\n\n def fit(self, X, Y):\n N = len(X)\n self.models = []\n for b in range(self.B):\n idx = np.random.choice(N, size=N, replace=True)\n Xb = X[idx]\n Yb = Y[idx]\n\n model = DecisionTreeClassifier(max_depth=2)\n model.fit(Xb, Yb)\n self.models.append(model)\n\n def predict(self, X):\n # no need to keep a dictionary since we are doing binary classification\n predictions = np.zeros(len(X))\n for model in self.models:\n predictions += model.predict(X)\n return np.round(predictions / self.B)\n\n def score(self, X, Y):\n P = self.predict(X)\n return np.mean(Y == P)\n\n\nmodel = BaggedTreeClassifier(200)\nmodel.fit(X, Y)\n\nprint(\"score for bagged model:\", model.score(X, Y))\n\n# plot data with boundary\nplt.scatter(X[:,0], X[:,1], s=100, c=Y, alpha=0.5)\nplot_decision_boundary(X, model)\nplt.show()\n\n\n\n","repo_name":"lazyprogrammer/machine_learning_examples","sub_path":"supervised_class2/bagging_classification.py","file_name":"bagging_classification.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","stars":7794,"dataset":"github-code","pt":"52"} +{"seq_id":"15159364569","text":"# import os\r\n# root_path = os.getcwd()\r\n# offset = len(root_path.split(\"\\\\\"))\r\n# for root,dirs,files in os.walk(root_path):\r\n# \tcurrent_dir=root\r\n# \tpath_list = current_dir.split(\"\\\\\")\r\n# \tindent_level = len(path_list) - offset\r\n# \t#print(\"\\t\"*indent_level,\"\\\\\"+path_list[-1])\r\n# \t#print(files)\r\n# \t#os.path.splittext\r\n# \tfor f in files:\r\n# \t# \t#print(os.path.splittext(f))\r\n# \t\tfile_name=os.path.splitext(f)[0]\r\n# \t\tfile_path=os.path.join(root,f)\r\n# \t\t#file_path = root+\"\\\\\"+file_name\r\n# \t\tprint(root)\r\n# \t\tprint(root+\"\\\\\"+file_name)\r\n# \t\tprint(\"\\t\"*(indent_level+1),file_name)\r\nfile_path = r'D:\\root\\dir1\\cp3_data_size.c' #源生字符串\r\n# print(file_path)\r\n\r\ndef line_count(file_path):\r\n \tcode_line,blank_line = 0,0\r\n \twith open(file_path,'r') as fp:\r\n \t\twhile True:\r\n \t\t\tline = fp.readline()\r\n \t\t\tif not line:\r\n \t\t\t\tbreak\r\n \t\t\r\n \t\t\tcode_line += 1\r\n \tprint(code_line,\"lines\")\r\nline_count(file_path)\r\n","repo_name":"bcdefu8374/11-29python","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21864289009","text":"import os\nimport pandas as pd\nimport numpy as np\nimport csv\n\nfrom itertools import cycle, product\nimport argparse\nimport warnings\n\nfrom sklearn.preprocessing import normalize\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, RepeatedStratifiedKFold, train_test_split\nfrom sklearn.utils import shuffle\nfrom sklearn.decomposition import NMF\nfrom sklearn.exceptions import ConvergenceWarning\n\n# import private scripts\nimport load_kmer_cnts_jf\nimport stats_utils_AEB\n\n\ngraph_dir = os.environ['HOME'] + '/deep_learning_microbiome/analysis/impt_features/kmer_lists'\n\n\n# User passes the model to be used as a command-line argument, which is parsed here.\nif __name__ == '__main__': \n parser = argparse.ArgumentParser(description= \"Program to run machine learning models and find impt feats\")\n parser.add_argument('-m', type = str, default = 'rf', help = \"Model type, can be rf lasso or lasso_nmf\")\n parser.add_argument('-k', type = int, default = 5, help = \"Kmer Size\")\n parser.add_argument('-f', type = int, default = 10, help = \"Number CV folds\")\n parser.add_argument('-r', type = int, default = 20, help = \"Number iterations of k-fold cross validation\")\n parser.add_argument('-ds', type = str, default = 'LiverCirrhosis', help = 'Data set')\n\n arg_vals = parser.parse_args()\n model_type = arg_vals.m\n kmer_size = arg_vals.k\n splits = arg_vals.f\n repeats = arg_vals.r\n data_set = arg_vals.ds\n\n#Functions\ndef class_to_target(cls):\n target = np.zeros((n_classes,))\n target[class_to_ind[cls]] = 1.0\n return target\n\n \ndef config_info(dataset_name, model_name, config, kmer_size, skip_keys=['DS', 'CL']):\n config_info = \"DS:\" + dataset_name\n for k in config: \n # skip the specified keys, used for skipping the fold and iteration indices (for aggregating results across them)\n if not k in skip_keys:\n config_info += '_' + k + ':' +str(get_config_val(k, config))\n return config_info\n\ndef get_config_val(config_key, config):\n val = config[config_key]\n if type(val) is list:\n val = '-'.join([ str(c) for c in val])\n return val\n\ndef get_reverse_complement(kmer):\n kmer_rev = ''\n for c in kmer:\n if c == 'A':\n kmer_rev += 'T'\n elif c == 'T':\n kmer_rev += 'A'\n elif c == 'C':\n kmer_rev += 'G'\n else:\n kmer_rev += 'C'\n\n return kmer_rev[::-1]\n \n\ndef get_feature_importances(clf, kmer_imps):\n print(\"GETTING FEATURE IMPORTANCES\")\n importances = clf.feature_importances_\n #std = np.std([tree.feature_importances_ for tree in clf.estimators_],\n # axis=0)\n for i in range(len(importances)):\n kmer_imps[i] += importances[i]\n print(\"FINISHED ADDING IMPORTANCES\")\n \ndef get_lasso_importances(estimator, kmer_imps):\n print(\"GETTING FEATURE IMPORTANCES\")\n importances = estimator.coef_\n for i in range(len(importances)):\n kmer_imps[i] += importances[0][i]\n print(\"FINISHED ADDING IMPORTANCES\")\n \ndef get_lasso_NMF_importances(estimator, factors):\n print(\"GETTING FEATURE IMPORTANCES\")\n importances = estimator.coef_\n for i in range(len(importances)):\n factors[i] += importances[0][i]\n print(\"FINISHED ADDING IMPORTANCES\")\n \n\n## data loading ##\ndata_set = [data_set]\nallowed_labels = ['0', '1']\nkmer_cnts, accessions, labelz, domain_labels = load_kmer_cnts_jf.load_kmers(kmer_size,\n data_set,\n allowed_labels)\nlabelz=np.asarray(labelz)\nlabelz=labelz.astype(np.int)\n\nif model_type == 'lasso_nmf':\n n=20\n data_normalized = normalize(kmer_cnts, axis = 1, norm = 'l1')\n data_normalized = stats_utils_AEB.NMF_factor(data_normalized, kmer_size, n_components = int(n), \n title=(str(data_set) + str(kmer_size) + \"mers\" \n + str(n) + \"factors\"))\n data_normalized, labels = shuffle(data_normalized, labelz, random_state=0)\n x = data_normalized\n y = labels\nelse:\n data_normalized = normalize(kmer_cnts, axis = 1, norm = 'l1')\n data_normalized, labels = shuffle(data_normalized, labelz, random_state=0) \n x = data_normalized\n y = labels\n\n\n## kmer or factor setup ##\nif model_type == 'lasso_nmf':\n kmers_no_comp=[]\n for i in range(n):\n kmers_no_comp.append(\"Factor\" + str(i) +\": \")\n factor_imps = np.zeros(len(kmers_no_comp))\nelse:\n kmers_no_comp = []\n all_kmers_caps = [''.join(_) for _ in product(['A', 'C', 'G', 'T'], repeat = kmer_size)]\n for kmer in all_kmers_caps:\n if get_reverse_complement(kmer) not in kmers_no_comp:\n kmers_no_comp.append(kmer)\n kmer_imps = np.zeros(len(kmers_no_comp))\n\n\n## set up model ##\nif model_type == 'rf':\n estimator = RandomForestClassifier(n_estimators=500, max_depth=None, min_samples_split=5, n_jobs=4, max_features='log2')\nelse:\n estimator = LogisticRegression(penalty='l1', solver='saga', max_iter=10000, n_jobs=4)\n\n\n## run the model ##\nk_fold = RepeatedStratifiedKFold(n_splits=splits, n_repeats=repeats)\nfor train_i, test_i in k_fold.split(x, y):\n x_train, y_train = x[train_i], y[train_i]\n x_test, y_test = x[test_i], y[test_i]\n use_norm = True\n \n if use_norm:\n sample_mean = x_train.mean(axis=0)\n sample_std = x_train.std(axis=0)\n x_train = (x_train - sample_mean) / sample_std\n x_test = (x_test - sample_mean) / sample_std\n \n y_train = np.array(y_train)\n y_test = np.array(y_test)\n \n estimator.fit(x_train, y_train)\n y_test_pred= np.array(estimator.predict_proba(x_test))\n \n if model_type == 'rf':\n get_feature_importances(estimator, kmer_imps)\n \n elif model_type == 'lasso':\n importances = estimator.coef_\n for i in range(len(importances.T)):\n kmer_imps[i] += abs(importances[0][i])\n \n elif model_type == 'lasso_nmf':\n importances = estimator.coef_\n for i in range(len(importances.T)):\n factor_imps[i] += abs(importances[0][i])\n \n\n \n## get the important features ##\nif model_type == 'lasso_nmf':\n imps = factor_imps\nelse:\n imps = kmer_imps\nnum_features = -1\nnum_feature_imps = num_features\nif (num_feature_imps == -1):\n num_feature_imps = len(imps)\nif imps is not None and num_feature_imps > 0:\n indices = np.argsort(imps)[::-1][0:num_feature_imps]\n imps = imps[indices]\n kmers_no_comp = [kmers_no_comp[i] for i in indices]\n file = open(graph_dir + \"/feat_imps_\" + str(model_type) + str(data_set) + str(kmer_size) + \"mers.txt\", \"w\")\n for i in range(num_feature_imps):\n if imps[i] > 0:\n file.write(kmers_no_comp[i] + \"\\t\" + str(imps[i] / (splits * repeats)) + \"\\n\")\n file.close()\n","repo_name":"19katz/microbiome-deep-learning","sub_path":"annamarie_models/finding_imp_feats_AEB.py","file_name":"finding_imp_feats_AEB.py","file_ext":"py","file_size_in_byte":7063,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"5038618677","text":"from fastapi import FastAPI\n\nfrom db.db_controller import get_respondent_from_str\n\n\napp = FastAPI()\n\n\n@app.get(\"/\")\nasync def index():\n return {'message': 'privet'}\n\n\n@app.get(\"/getPercent\")\nasync def get_percent(audience1: str = '', audience2: str = ''):\n \"\"\"Получить процент вхождения второй аудитории в первую, основываясь на среднем Weight\"\"\"\n audience1 = await get_respondent_from_str(audience1)\n audience2 = await get_respondent_from_str(audience2)\n audience1_weight = set(map(lambda x: x[1], audience1))\n audience2_weight = set(map(lambda x: x[1], audience2))\n\n if audience1_weight:\n percentage = sum(audience1_weight & audience2_weight) / sum(audience1_weight)\n else:\n percentage = 0\n\n return {\"percent\": percentage * 100}\n\n\n","repo_name":"Tokareff2020/test_OKKAM","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72498871844","text":"# coding=utf-8\nfrom setuptools import setup, find_packages\n\nimport os\nINSTALL = ['assisted-service-client', 'prettytable', 'PyYAML']\ndescription = 'Assisted installer assistant'\nlong_description = description\nif os.path.exists('README.rst'):\n long_description = open('README.rst').read()\n\nsetup(\n name='aicli',\n version='99.0',\n include_package_data=True,\n packages=find_packages(),\n zip_safe=False,\n description=description,\n long_description=long_description,\n url='http://github.com/karmab/assisted-installer-cli',\n author='Karim Boumedhel',\n author_email='karimboumedhel@gmail.com',\n license='ASL',\n install_requires=INSTALL,\n entry_points='''\n [console_scripts]\n aicli=ailib.cli:cli\n ''',\n)\n","repo_name":"karmab/aicli","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"52"} +{"seq_id":"4632294272","text":"import sqlite3\nfrom sqlite3 import Error\nimport numpy as np\n\n\ndef create_connection(db_file):\n \"\"\" create a database connection to the SQLite database\n specified by db_file\n :param db_file: database file\n :return: Connection object or None\n \"\"\"\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n return conn\n except Error as e:\n print(e)\n\n return conn\n\n\ndef create_table(conn, create_table_sql):\n \"\"\" create a table from the create_table_sql statement\n :param conn: Connection object\n :param create_table_sql: a CREATE TABLE statement\n :return:\n \"\"\"\n try:\n c = conn.cursor()\n c.execute(create_table_sql)\n except Error as e:\n print(e)\n\n\ndef save_public_trade(conn, public_trade):\n \"\"\"\n Create a new project into the projects table\n :param conn:\n :param project:\n :return: project id\n \"\"\"\n sql = ''' \n INSERT INTO public_trades(sequence, instrument_code,price,amount,volume,taker_side, DateTime_UTC)\n VALUES(?,?,?,?,?,?,?) \n \n '''.format()\n \"\"\"WHERE NOT EXISTS (SELECT sequence FROM public_trades WHERE sequence = {}) LIMIT 1 \"\"\"\n \"\"\"WHERE NOT EXISTS (SELECT primary-key FROM table-name WHERE primary-key = inserted-record) LIMIT 1\"\"\"\n cur = conn.cursor()\n cur.execute(sql, public_trade)\n conn.commit()\n return cur.lastrowid\n\n\ndef save_public_trade_df(conn, public_trade_df):\n \"\"\"\n Create a new project into the projects table\n :param conn:\n :param project:\n :return: project id\n \"\"\"\n\n for row in public_trade_df.iterrows():\n vals = row[1].values\n indx = row[1].name\n trades_as_array = np.insert(vals, 0, vals[-1])[:-1]\n trades_as_array = np.append(trades_as_array, indx.replace(microsecond=0).to_pydatetime())\n trades_as_tuple = tuple(trades_as_array)\n\n sql = ''' \n INSERT OR IGNORE INTO public_trades(sequence, instrument_code,price,amount,volume,taker_side, DateTime_UTC)\n VALUES(?,?,?,?,?,?,?) '''.format()\n cur = conn.cursor()\n cur.execute(sql, trades_as_tuple)\n conn.commit()\n return cur.lastrowid\n\n\ndef update_trade(conn, public_trade):\n \"\"\"\n update priority, begin_date, and end date of a task\n :param conn:\n :param task:\n :return: project id\n \"\"\"\n sql = ''' UPDATE public_trades\n SET sequence = ? ,\n instrument_code = ? ,\n price = ?,\n amount = ? ,\n taker_side = ? ,\n volume = ? ,\n time = ?\n WHERE sequence = ?'''\n cur = conn.cursor()\n cur.execute(sql, public_trade)\n conn.commit()\n\n\ndef select_all_trades(conn, tz_from=None, tz_until=None):\n \"\"\"\n Query all rows in the tasks table\n :param conn: the Connection object\n :return:\n \"\"\"\n import datetime as dt\n import pytz\n import pandas as pd\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM public_trades\")\n\n columns = []\n for col in cur.description:\n columns.append(col[0])\n\n rows = cur.fetchall()\n df = pd.DataFrame(rows, columns=columns)\n\n # df[\"DateTime_UTC\"] = df[\"DateTime_UTC\"].apply(lambda dt_utc: dt.datetime.strptime(dt_utc, \"%Y-%m-%d %H:%M:%S.%f\"))\n df[\"DateTime_UTC\"] = df[\"DateTime_UTC\"].apply(lambda dt_utc: dt.datetime.strptime(dt_utc, \"%Y-%m-%d %H:%M:%S\"))\n df.set_index(\"DateTime_UTC\", inplace=True, drop=True)\n\n return df\n\n\ndef select_trade_by_sequence(conn, sequence):\n \"\"\"\n Query tasks by priority\n :param conn: the Connection object\n :param priority:\n :return:\n \"\"\"\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM public_trades WHERE sequence=?\", (sequence,))\n\n rows = cur.fetchall()\n\n for row in rows:\n print(row)\n return rows\n\n\ndef delete_trade(conn, sequence):\n \"\"\"\n Delete a trade by trade id\n :param conn: Connection to the SQLite database\n :param id: id of the task\n :return:\n \"\"\"\n sql = 'DELETE FROM public_trades WHERE sequence=?'\n cur = conn.cursor()\n cur.execute(sql, (sequence,))\n conn.commit()\n\n\ndef delete_all_trades(conn):\n \"\"\"\n Delete all rows in the public_trades table\n :param conn: Connection to the SQLite database\n :return:\n \"\"\"\n sql = 'DELETE FROM public_trades'\n cur = conn.cursor()\n cur.execute(sql)\n conn.commit()\n\n\ndef drop_table(conn, table_name):\n \"\"\"\n Delete a table from db\n :param conn: Connection to the SQLite database\n :param table_name: Name of table to be removed\n :return:\n \"\"\"\n sql = 'DROP TABLE {}'.format(table_name)\n cur = conn.cursor()\n cur.execute(sql)\n conn.commit()\n\n\ndef get_table_column_names(conn, table_name):\n \"\"\"\n Get the Column names of a table\n :param conn: Connection to the SQLite database\n :param table_name: Name of table to show the columns from\n :return:\n \"\"\"\n sql = '''SELECT * FROM INFORMATION_SCHEMA.COLUMNS\n --WHERE TABLE_NAME = N{}'''.format(table_name)\n \"\"\"SELECT * FROM sys.columns WHERE object_id = OBJECT_ID('dbo.yourTableName') \"\"\"\n cur = conn.cursor()\n cur.execute(sql)\n conn.commit()\n\n\ndatabase = r\"pythonsqlite.db\"\n # id integer PRIMARY KEY,\n\nclass tables():\n public_trades_table = \"\"\" CREATE TABLE IF NOT EXISTS public_trades (\n sequence INTEGER PRIMARY KEY,\n instrument_code text NOT NULL,\n price FLOAT,\n amount FLOAT,\n volume FLOAT,\n taker_side text NOT NULL,\n DateTime_UTC DATETIME\n ); \"\"\"\n\n\"\"\"# create a database connection\nconn = create_connection(database)\n\ncreate_table(conn, sql_create_public_trades_table)\n# create a new project\n# \" \"\"instrument_code, price, amount, taker_side, volume, time, sequence\"\"\na_trade = (\"1054563\", 'BTC_EUR', '27640.12', '0.00107', 'SELL', '29.5749284', '2021-01-31T12:06:00.694Z')\n# a_trade = ('Cool App with SQLite & Python', '2015-01-01', '2015-01-30')\nproject_id = save_public_trade(conn, public_trade=a_trade)\nupdate_trade(conn, (\"1054563\", 'BTC_EUR', '27640.12123123', '0.00107', 'SELL', '29.5749284', '2021-01-31T12:06:00.694Z', '1054563'))\nselect_all_trades(conn)\ndelete_trade(conn, sequence=\"1054563\")\n# drop_table(conn, 'public_trades')\n\"\"\"","repo_name":"d4ddel/btc_trading","sub_path":"modules/local_db.py","file_name":"local_db.py","file_ext":"py","file_size_in_byte":6537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11787774526","text":"#!/usr/bin/python3\n\"\"\"\nMOdule that contains add_attribute function\n\"\"\"\n\n\ndef add_attribute(obj, attr, value):\n \"\"\"\n function to add an atrribute if possible\n and raise TypeError if not possible\n \"\"\"\n if hasattr(obj, \"__dict__\"):\n setattr(obj, attr, value)\n return\n raise TypeError(\"can't add new attribute\")\n","repo_name":"amanuelgthn/alx-higher_level_programming","sub_path":"0x0A-python-inheritance/101-add_attribute.py","file_name":"101-add_attribute.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"9780700972","text":"\"\"\"brotli handling component\n\nThis component is used to compress/decompress with brotli\"\"\"\n\nimport logging\nimport time\n\nimport brotli\n\nfrom nvp.nvp_component import NVPComponent\nfrom nvp.nvp_context import NVPContext\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_component(ctx: NVPContext):\n \"\"\"Create an instance of the component\"\"\"\n return BrotliHandler(ctx)\n\n\nclass BrotliHandler(NVPComponent):\n \"\"\"BrotliHandler component class\"\"\"\n\n def __init__(self, ctx: NVPContext):\n \"\"\"Component constructor\"\"\"\n NVPComponent.__init__(self, ctx)\n\n def process_cmd_path(self, cmd):\n \"\"\"Re-implementation of process_cmd_path\"\"\"\n\n if cmd == \"compress\":\n file = self.get_param(\"input_file\")\n outfile = self.get_param(\"output_file\")\n return self.compress_file(file, outfile)\n\n if cmd == \"decompress\":\n file = self.get_param(\"input_file\")\n outfile = self.get_param(\"output_file\")\n return self.decompress_file(file, outfile)\n\n return False\n\n def compress_file(self, input_file, output_file=None):\n \"\"\"Compress a file\"\"\"\n if output_file is None:\n output_file = input_file + \".br\"\n\n params = {\n # 'mode': brotli.MODE_TEXT # Set to brotli.MODE_TEXT for text-based files\n \"mode\": brotli.MODE_GENERIC,\n \"quality\": 11,\n \"lgwin\": 22,\n \"lgblock\": 0,\n }\n\n start_time = time.time()\n content = self.read_binary_file(input_file)\n logger.info(\"Compressing %s...\", input_file)\n compressed = brotli.compress(content, **params)\n\n # write the compressed data:\n self.write_binary_file(compressed, output_file)\n\n elapsed = time.time() - start_time\n logger.info(\"Compressed %s in %.2fsecs\", input_file, elapsed)\n\n return True\n\n def decompress_file(self, input_file, output_file=None):\n \"\"\"Compress a file\"\"\"\n if output_file is None:\n output_file = self.set_path_extension(input_file, \"\")\n\n start_time = time.time()\n content = self.read_binary_file(input_file)\n logger.info(\"Decompressing %s...\", input_file)\n decompressed = brotli.decompress(content)\n\n # write the compressed data:\n self.write_binary_file(decompressed, output_file)\n\n elapsed = time.time() - start_time\n logger.info(\"Decompressed %s in %.2fsecs\", input_file, elapsed)\n\n return True\n\n\nif __name__ == \"__main__\":\n # Create the context:\n context = NVPContext()\n\n # Add our component:\n comp = context.register_component(\"BrotliHandler\", BrotliHandler(context))\n\n psr = context.build_parser(\"compress\")\n psr.add_str(\"input_file\")(\"File to compress\")\n psr.add_str(\"-o\", \"--output\", dest=\"output_file\")(\"Output destination for compress\")\n psr = context.build_parser(\"decompress\")\n psr.add_str(\"input_file\")(\"File to decompress\")\n psr.add_str(\"-o\", \"--output\", dest=\"output_file\")(\"Output destination for decompress\")\n\n comp.run()\n","repo_name":"roche-emmanuel/nervproj","sub_path":"nvp/admin/brotli_handler.py","file_name":"brotli_handler.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"15614977477","text":"from pathlib import Path\nimport csv\nimport datetime\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport matplotlib.dates as mdates\n\nclass Place:\n ''' That place's plot. '''\n\n def __init__(self, _CSV_FILE, _NAME):\n ''' Initialize main attributes. '''\n\n _PATH = Path(_CSV_FILE)\n _READER = csv.reader(_PATH.read_text().splitlines())\n _HEADER_ROW = next(_READER)\n\n _DATES_INDEX = _HEADER_ROW.index('DATE')\n _HIGHS_INDEX = _HEADER_ROW.index('TMAX')\n _LOWS_INDEX = _HEADER_ROW.index('TMIN')\n\n _dates, self.highs, self.lows = [], [], []\n for _row in _READER:\n try:\n _date = datetime.datetime.strptime(_row[_DATES_INDEX], '%Y-%m-%d')\n _high = int(_row[_HIGHS_INDEX])\n _low = int(_row[_LOWS_INDEX])\n except ValueError:\n print(f'Missing data for {_date} at {_NAME}.')\n else:\n _dates.append(_date)\n self.highs.append(_high)\n self.lows.append(_low)\n _FIRST_DAY, _last_day = _dates[0], _dates[-1]\n _last_day += datetime.timedelta(days=1)\n\n _fig, self._ax = plt.subplots()\n _fig.canvas.manager.set_window_title(f'{_NAME}, 2021')\n self._ax.plot(_dates, self.highs, c='red', alpha=0.5)\n self._ax.plot(_dates, self.lows, c='blue', alpha=0.5)\n self._ax.fill_between(_dates, self.highs, self.lows, facecolor='blue', alpha=0.1)\n\n self._ax.set_title('Daily high and low temperatures')\n self._ax.set_ylabel('Temperature (F)')\n _fig.set_figheight(8)\n _fig.set_figwidth(15)\n\n self._ax.xaxis.set_major_locator(mdates.MonthLocator())\n self._ax.xaxis.set_minor_locator(mdates.MonthLocator(bymonthday=16))\n self._ax.xaxis.set_major_formatter(ticker.NullFormatter())\n self._ax.xaxis.set_minor_formatter(mdates.DateFormatter('%b'))\n #\n for tick in self._ax.xaxis.get_minor_ticks():\n tick.tick1line.set_markersize(0)\n tick.tick2line.set_markersize(0)\n plt.xlim(_FIRST_DAY, _last_day)\n\n def set_ylims(self, MIN, MAX):\n self._ax.set_ylim(MIN, MAX)\n \n def show(self):\n plt.show()\n\n_sitka = Place('weather_data/sitka_weather_2021_simple.csv', 'Sitka')\n_death_valley = Place('weather_data/death_valley_2021_simple.csv',\n 'Death Valley')\n\nMIN = min(min(_sitka.lows), min(_death_valley.lows))\nMAX = max(max(_sitka.highs), max(_death_valley.highs))\n\n_sitka.set_ylims(MIN, MAX)\n_death_valley.set_ylims(MIN, MAX)\n\nplt.show()","repo_name":"LucasDondo/python_crash_course","sub_path":"Part 2/Project 2. Data Visualization/Chapter 16. Downloading Data/2_sitka_death_valley_comparison.py","file_name":"2_sitka_death_valley_comparison.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"71329267636","text":"from django.contrib.auth import get_user_model\n# from django.http import response\nfrom django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\n\nfrom src.users.models import UserAddress\nfrom ..models import Task\nfrom ..serializers import TaskSerializer\n\n\nUser = get_user_model()\n\n\nclass TaskTests(APITestCase):\n\n def setUp(self):\n self.url = reverse('task-list')\n\n self.user_1 = User.objects.create_user(\n mobile='+79876543210',\n password='test_password',\n )\n\n self.user_1_address = UserAddress.objects.create(\n street='Lenina',\n house='1',\n building='1',\n apartment='1',\n user=self.user_1,\n )\n\n self.user_2 = User.objects.create_user(\n mobile='+77766655544',\n password='test_password',\n )\n\n self.task_1 = Task.objects.create(\n subject='First subject',\n text='First text',\n surname='Lenin',\n name='Vladimir',\n patronymic='Ilyich',\n phone='+79876543210',\n email='lenin@mausoleum.su',\n user=self.user_1,\n address=self.user_1_address,\n attachment=None\n )\n\n self.task_2 = Task.objects.create(\n subject='Second subject',\n text='Second text',\n surname='Lenin',\n name='Vladimir',\n patronymic='Ilyich',\n phone='+79876543210',\n email='lenin@mausoleum.su',\n user=self.user_1,\n address=self.user_1_address,\n attachment=None\n )\n\n self.url_detail = reverse('task-detail', args=[self.task_2.id])\n\n self.client.force_authenticate(user=self.user_1)\n\n def test_retrieve_tasks(self):\n tasks = Task.objects.all()\n serializer = TaskSerializer(tasks, many=True)\n response = self.client.get(self.url)\n\n self.assertEqual(response.data, serializer.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_create_tasks(self):\n data = {\n \"subject\": \"Third subject\",\n \"text\": \"Third text\",\n \"surname\": \"Kerensky\",\n \"name\": \"Alexander\",\n \"patronymic\": \"Fyodorovich\",\n \"phone\": \"+79626320009\",\n \"email\": \"kerensky@example.com\",\n \"attachment\": \"\",\n\n \"address.street\": \"Goncharova\",\n \"address.house\": \"2\",\n \"address.building\": \"2\",\n \"address.apartment\": \"2\"\n }\n\n response = self.client.post(\n self.url,\n data=data,\n )\n\n self.assertEqual(status.HTTP_201_CREATED, response.status_code)\n self.assertEqual(3, Task.objects.count())\n self.assertEqual(self.user_1, Task.objects.last().user)\n\n def test_update_tasks(self):\n data = {\n \"subject\": \"Third subject updated\",\n \"address.house\": \"99\",\n }\n\n response = self.client.patch(self.url_detail, data=data)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n self.task_2.refresh_from_db()\n\n self.assertEqual(self.task_2.subject, data['subject'])\n self.assertEqual(self.task_2.address.house, data['address.house'])\n\n def test_update_tasks_not_owner(self):\n data = {\n \"subject\": \"Third subject updated by user_2\",\n }\n\n self.client.force_authenticate(user=self.user_2)\n\n response = self.client.patch(self.url_detail, data=data)\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n","repo_name":"iNgredie/spring2021","sub_path":"src/tasks/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"39816361341","text":"# -*- coding: utf-8 -*-\n\nfrom flask import Blueprint, abort, jsonify, request\nfrom flask_login import login_required\n\nfrom ..models.cities import City\n\nbp = Blueprint('Cities', __name__, url_prefix='/cidades')\n\n\n@bp.route('/')\n@login_required\ndef cities():\n if not request.is_xhr:\n abort(404)\n search = request.args.get('search', '', type=str)\n pagination = City.fetch(search, '', '', 1)\n cities = pagination.items\n\n return jsonify({'result': [c.serialize() for c in cities]})\n","repo_name":"Vsg5662/syscemit","sub_path":"app/views/cities.py","file_name":"cities.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"17141668080","text":"import numpy as np\nimport pandas as pd\nimport sys\nimport os\nfrom os import sys, path\nfrom scipy.sparse import csr_matrix\nfrom scipy.io import mmread\n\nprint(len(sys.argv))\nassert len(sys.argv) == 4 or len(sys.argv) == 6 or len(sys.argv) == 7, \"parameters needed: rna path, result folder\"\nrna_path = str(sys.argv[1])\natac_path = str(sys.argv[2])\nresult_folder = str(sys.argv[3])\nif len(sys.argv) >= 6:\n subset_rna = str(sys.argv[4])\n if subset_rna == '!':\n subset_rna = None\n subset_atac = str(sys.argv[5])\n if subset_atac == '!':\n subset_atac = None\nelse:\n subset_rna = None\n subset_atac = None\nif len(sys.argv) >= 7:\n rna_new_annot = sys.argv[6]\n if rna_new_annot == '!':\n rna_new_annot = None\nelse:\n rna_new_annot = None\n\ndef read_txt_np(filename):\n with open(filename) as file:\n lines = file.readlines()\n lines = [line.rstrip() for line in lines]\n return np.array(lines) \n\ndef run_random(rna_path, atac_path, result_folder, subset_rna, subset_atac, rna_new_annot):\n if not os.path.exists(result_folder):\n os.makedirs(result_folder)\n if result_folder[-1] != '/':\n result_folder += '/'\n\n rna_cell_names = read_txt_np(path.join(rna_path, 'cells.txt'))\n atac_cell_names = read_txt_np(path.join(atac_path, 'cells.txt'))\n if rna_new_annot is not None:\n rna_label = read_txt_np(rna_new_annot)\n else:\n rna_label = read_txt_np(path.join(rna_path, 'annotations.txt'))\n \n ## subset\n if subset_rna is not None:\n subset_rna_barcodes = read_txt_np(subset_rna)\n ids_series = pd.Series(np.arange(len(rna_cell_names)), index=rna_cell_names)\n idx_bc_rna = ids_series[subset_rna_barcodes]\n rna_label = rna_label[idx_bc_rna]\n rna_cell_names = subset_rna_barcodes\n if subset_atac is not None:\n subset_atac_barcodes = read_txt_np(subset_atac)\n ids_series = pd.Series(np.arange(len(atac_cell_names)), index=atac_cell_names)\n idx_bc_atac = ids_series[subset_atac_barcodes]\n atac_cell_names = subset_atac_barcodes\n \n dic = {}\n for ct in set(rna_label):\n dic[ct] = np.sum(rna_label == ct)/len(rna_label)\n prob = np.array(list(dic.values())).repeat(len(atac_cell_names)).reshape(-1,len(atac_cell_names)).transpose()\n prob = pd.DataFrame(prob, index=atac_cell_names, columns=list(dic.keys()))\n prob.to_csv(result_folder+'prob.csv')\n # predicted label\n pred = prob.idxmax(axis=1)\n pred.to_csv(result_folder+'pred.csv')\n\nrun_random(rna_path, atac_path, result_folder, subset_rna, subset_atac, rna_new_annot)","repo_name":"AprilYuge/ATAC-annotation-benchmark","sub_path":"method_running/rand.py","file_name":"rand.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"4"} +{"seq_id":"74092046198","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 30 09:28:38 2023\n\n@author: guill\n\"\"\"\n\nimport cv2\nimport numpy as np\n\n# Load\n\nhadamard_basis_0 = np.load('hadamard/wh0_0_32.npy')\n\nif np.max(hadamard_basis_0) == 1:\n \n hadamard_basis_0 = 10 + (180 * hadamard_basis_0)\n\n# Arrange data\n\nhs = hadamard_basis_0.shape[0] # hadamard size\nmw = hs * 16 // 9 # matrix width (squares to fill the screen)\n\nmatrix_i = np.uint8(np.ones((hs, mw)) * 10) # generate matrix to be shown\n\n# Show on window\n\na = 3\n\nif a == 0:\n \n cv2.namedWindow('singleWH',cv2.WND_PROP_FULLSCREEN)\n cv2.setWindowProperty('singleWH',cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)\n \n matrix_i[:,(mw - hs) // 2 : (mw + hs) // 2] = hadamard_basis_0[:,:,0] \n \n cv2.imshow('singleWH', matrix_i)\n \nelif a == 1:\n \n cv2.namedWindow('singleWH',cv2.WND_PROP_FULLSCREEN)\n cv2.setWindowProperty('singleWH',cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)\n \n c = 0\n \n while True:\n \n matrix_i[:,(mw - hs) // 2 : (mw + hs) // 2] = hadamard_basis_0[:,:,c] \n \n cv2.imshow('singleWH', matrix_i)\n \n cv2.waitKey(1010//30)\n \n c += 1\n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n \n break\n\nelif a == 2:\n \n import pygame\n from pygame.locals import *\n \n pygame.init()\n \n WIDTH = 1920\n HEIGHT = 1080\n \n windowSurface = pygame.display.set_mode((WIDTH, HEIGHT), 0, 32)\n \n img = pygame.image.load(\"test.jpeg\")\n \n while True:\n \n events = pygame.event.get()\n \n for event in pygame.event.get():\n \n if event.type == QUIT:\n \n pygame.quit()\n sys.exit()\n \n windowSurface.blit(img, (0, 0)) #Replace (0, 0) with desired coordinates\n \n pygame.display.flip()\n\n \nelif a == 3:\n \n import whdynamic as wh\n \n order = 11\n \n sqrtN = 2 ** (order - 1)\n \n W = wh.W(order)\n \n hs = sqrtN # hadamard size\n mw = hs * 16 // 9 # matrix width (squares to fill the screen)\n \n matrix_i = np.uint8(np.zeros((hs, mw))) # generate matrix to be shown\n \n cv2.namedWindow('singleWH',cv2.WND_PROP_FULLSCREEN)\n cv2.setWindowProperty('singleWH',cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)\n \n for i in range(sqrtN):\n \n for j in range(sqrtN):\n \n matrix_i[:,(mw - hs) // 2 : (mw + hs) // 2] = 255 * wh.hadamard_ij(W, i, j, invert = False) \n \n cv2.imshow('singleWH', matrix_i)\n \n cv2.waitKey(1000//1000)\n \n \n\n\n\n","repo_name":"Willy8m/Single-Pixel-Imaging","sub_path":"03_ShowWH_matrices.py","file_name":"03_ShowWH_matrices.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"3880064983","text":"import argparse\nfrom rename import load_data\nimport pymysql\nfrom update_objects import update_objects\nfrom PIL import Image\n\n\ndef main(args):\n path =r'F:\\20220104泰州部署\\jyzPS\\inputImg'\n path1 = r'F:\\20220104泰州部署\\jyzPS\\output'\n path2 =r'F:\\20220104泰州部署\\jyzPS\\output_json'\n load_data(path,path1, path2)\n #load_data(args.img_org_dir,args.img_output_dir,args.json_output_dir) #入库脚本目录下的data目录中的数据(data目录中可包含xml,json等标注文件)\n #update_objects(args.img_org_dir,args.json_output_dir) #已入库数据标注后将xml,json内容合并在图片关联json中\n\n\n # xml_path=r'E:\\入库器\\已入库的xml更新'\n # json_path=r'C:\\Users\\zzc\\Desktop\\待更新json\\json'\n # update_objects(xml_path, json_path)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--img_org_dir', type=str, default='data1') #导入数据原始目录\n parser.add_argument('--img_output_dir', type=str, default='output1') #导出重命名图片所在目录\n parser.add_argument('--json_output_dir', type=str, default='output_json1') #导出重命名图片对应json所在目录\n opt = parser.parse_args()\n assert opt.img_output_dir != opt.img_org_dir, \"请检查输出目录是否正确!\"\n main(opt)","repo_name":"newjokker/saturn_database","sub_path":"db_tools/jiahao_secret/data_loading.py","file_name":"data_loading.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"23321787054","text":"import re\nimport time\nimport logging\nimport typing as tp\n\nfrom revizor2 import CONF\nfrom revizor2.api import Farm, IMPL, Role, FarmRole\nfrom revizor2.consts import BEHAVIORS_ALIASES, DATABASE_BEHAVIORS, Dist\nfrom revizor2.exceptions import NotFound\nfrom revizor2.helpers import farmrole\nfrom revizor2.helpers.roles import get_role_versions\nfrom scalarizr.lib.defaults import Defaults\nfrom scalarizr.lib import cloud_resources as lib_resources\n\nLOG = logging.getLogger(__name__)\n\n\ndef clear(farm: Farm):\n farm.roles.reload()\n if len(farm.roles):\n LOG.info('Clear farm roles')\n IMPL.farm.clear_roles(farm.id)\n farm.vhosts.reload()\n for vhost in farm.vhosts:\n LOG.info(f'Delete vhost: {vhost.name}')\n vhost.delete()\n try:\n farm.domains.reload()\n for domain in farm.domains:\n LOG.info(f'Delete domain: {domain.name}')\n domain.delete()\n except Exception:\n pass\n\n\ndef add_role_to_farm(context: dict,\n farm: Farm,\n behavior: str = None,\n dist: str = None,\n role: Role = None,\n role_name: str = None,\n role_options: tp.List[str] = None,\n alias: str = None) -> FarmRole:\n behavior = (behavior or CONF.feature.behavior).strip()\n role_name = (role_name or '').strip()\n if role:\n role_id = role.id #FIXME: Use Role object below\n else:\n role_id = CONF.feature.role_id or context.get(f'{role_name}_id', None)\n if role_options:\n LOG.debug(f'Additional role options: {role_options}')\n if role_id:\n if not isinstance(role_id, int) and not role_id.isdigit():\n raise AssertionError('Role environment variable can\\'t be only in digit format')\n LOG.info(f'Get role by id: {role_id}')\n role = IMPL.role.get(role_id)\n else:\n role = get_role_by_behavior(behavior, dist=dist)\n if not role:\n raise NotFound('Role with id or by mask \"%s\" not found in Scalr' % (\n role_id or behavior))\n\n # world.wrt(etree.Element('meta', name='role', value=role['name']))\n # world.wrt(etree.Element('meta', name='dist', value=role['dist']))\n previously_added_roles = [r.id for r in farm.roles]\n\n alias = alias or role['name']\n LOG.info(f'Add role {role[\"id\"]} with alias {alias} to farm')\n role_params = setup_farmrole_params(\n context,\n farm,\n role_options=role_options,\n alias=alias,\n behaviors=behavior)\n\n farm.add_role(role['id'], options=role_params.to_json())\n time.sleep(5)\n farm.roles.reload()\n added_role = [r for r in farm.roles if r.id not in previously_added_roles]\n\n if not added_role:\n raise AssertionError(f'Added role \"{role[\"name\"]}\" not found in farm')\n LOG.debug(f'Save role object with name {added_role[0].alias}')\n context[f'{added_role[0].alias}_role'] = added_role[0]\n context[f'role_params_{added_role[0].id}'] = role_params\n return added_role[0] #TODO: Scalr return addedFarmRoleIds\n\n\ndef get_role_by_behavior(behavior, dist: str = None) -> dict:\n behavior = BEHAVIORS_ALIASES.get(behavior, behavior)\n dist = Dist(dist) if dist else CONF.feature.dist\n use_cloudinit_role = '-cloudinit' in behavior\n role_type = CONF.feature.role_type\n\n if use_cloudinit_role:\n dist_mask = dist.id\n role_ver_tpl = 'tmp-{beh}-{dist}-*-*'\n role_name_tpl = 'tmp-{beh}-{dist}-{ver}'\n else:\n dist_mask = dist.mask\n role_ver_tpl = '{beh}*-{dist}-{type}'\n role_name_tpl = '{beh}{ver}-{dist}-{type}'\n\n role_ver_mask = role_ver_tpl.format(\n beh=behavior,\n dist=dist_mask,\n type=role_type)\n LOG.info(f'Get role versions by mask: {role_ver_mask}')\n\n role_version = get_role_versions(role_ver_mask, use_latest=True)\n role_name = role_name_tpl.format(\n beh=behavior,\n dist=dist_mask,\n ver=role_version,\n type=role_type)\n LOG.info(f'Get role by name: {role_name}')\n roles = IMPL.role.list(dist=dist.dist, query=role_name)\n if roles:\n return roles[0]\n raise NotFound('Role with name: %s not found in Scalr' % role_name)\n\n\ndef setup_farmrole_params(context: dict,\n farm: Farm,\n role_options: tp.List[str] = None,\n alias: str = None,\n behaviors: tp.Union[str, tp.List[str]] = None,\n setup_bundled_role: bool = False) -> farmrole.FarmRoleParams:\n platform = CONF.feature.platform\n dist = CONF.feature.dist\n behaviors = behaviors or []\n role_options = role_options or []\n role_params = farmrole.FarmRoleParams(platform, alias=alias)\n\n if isinstance(behaviors, str):\n behaviors = [behaviors]\n\n if not (setup_bundled_role and len(f'{farm.name}-{alias}') < 63):\n Defaults.set_hostname(role_params)\n\n if CONF.feature.platform.is_vmware:\n if 'vmware-scalr-auto' in role_options:\n placement_strategy = role_options.pop().split('-', 1)[1]\n else:\n placement_strategy = 'manual'\n Defaults.set_vmware_attributes(\n role_params,\n placement_strategy=placement_strategy)\n\n for opt in role_options:\n LOG.info(f'Inspect role option: {opt}')\n if opt in ('branch_latest', 'branch_stable'):\n role_params.advanced.agent_update_repository = opt.split('_')[1]\n elif 'redis processes' in opt:\n redis_count = re.findall(r'(\\d+) redis processes', opt)[0].strip()\n LOG.info(f'Setup {redis_count} redis processes')\n role_params.database.redis_processes = int(redis_count)\n elif 'chef-solo' in opt:\n Defaults.set_chef_solo(role_params, opt)\n elif 'chef-hostname' in opt:\n Defaults.set_chef_hostname(role_params, context.get('chef_hostname_for_cookbook'))\n elif 'efs' in opt:\n Defaults.set_efs_storages(role_params, context.get('linked_services'))\n elif 'ansible-tower' in opt:\n Defaults.set_ansible_tower(role_params, context)\n elif 'ansible-orchestration' in opt:\n Defaults.set_ansible_orchestration(role_params, context)\n else:\n Defaults.apply_option(role_params, opt)\n\n if not setup_bundled_role:\n if dist.is_windows:\n role_params.advanced.reboot_after_hostinit = True\n # elif dist.id == 'scientific-6-x' or \\\n # (dist.id in ['centos-6-x', 'centos-7-x'] and platform.is_ec2):\n # role_params.advanced.disable_iptables_mgmt = False\n\n if platform.is_ec2:\n role_params.global_variables.variables.append(\n role_params.global_variables,\n farmrole.Variable(\n name='REVIZOR_TEST_ID',\n value=context['test_id']\n )\n )\n if 'rabbitmq' in behaviors:\n role_params.network.hostname_template = ''\n\n if any(b in DATABASE_BEHAVIORS for b in behaviors):\n LOG.debug('Setup default db storages')\n Defaults.set_db_storage(role_params)\n if 'redis' in behaviors:\n LOG.info('Insert redis settings')\n snapshotting_type = CONF.feature.redis_snapshotting\n role_params.database.redis_persistence_type = snapshotting_type\n role_params.database.redis_use_password = True\n\n return role_params\n\n\ndef link_efs_cloud_service_to_farm(farm: Farm, efs: dict) -> bool:\n \"\"\"Link an Amazon efs to farm\n\n @type farm: Farm\n @param farm:\n\n @type efs: dict\n @param efs: cloud object details\n \"\"\"\n service_params = dict(\n service_type='efs',\n cloud_id=efs['fileSystemId'],\n name=efs['name']\n )\n res = IMPL.farm.link_cloud_service(farm_id=farm.id, **service_params)\n LOG.info(f'Link an Amazon efs {efs[\"name\"]}:[{efs[\"fileSystemId\"]}] to farm [{farm.id}]. {res[\"successMessage\"]}')\n return res['success']\n\n\ndef remove_cloud_resources_linked_to_farm(farm: Farm):\n \"\"\"Remove cloud resources linked to Farm\n\n @type farm: Farm\n @param farm:\n \"\"\"\n linked_services = IMPL.farm.get_settings(farm.id)['farm']['services']\n LOG.info(f\"Linked to farm [{farm.id}] cloud services: {linked_services}\")\n for service in linked_services:\n method = getattr(lib_resources, f\"delete_{service['type']}\", None)\n if method:\n LOG.info(f\"Remove {service['type']} service {service['cloudObjectId']} from {service['platform']} cloud\")\n IMPL.farm.unlink_cloud_service(farm.id, service['cloudObjectId'])\n method(\n cloud_id=service['cloudObjectId'],\n cloud_location=service['cloudLocation'],\n cloud_name=service['name']\n )\n\n\ndef get_farm_state(farm: Farm, state: str):\n farm = Farm.get(farm.id)\n if farm.status == state:\n return True\n else:\n raise AssertionError('Farm is Not in %s state' % state)\n","repo_name":"Scalr/revizor-tests","sub_path":"scalarizr/lib/farm.py","file_name":"farm.py","file_ext":"py","file_size_in_byte":9064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"25467713192","text":"import re\nfrom numpy import arange\n\nfrom .tablatexlib import traduire_latex, maths\nfrom ...pylib import print_error\nfrom ...mathlib.custom_functions import round_afz\nfrom ... import param\n\ndef _eval_math(chaine):\n return float(eval(chaine, maths.__dict__))\n\n\ndef _auto_tabval(chaine='', formatage_antecedents='VAL', formatage_images='VAL', precision=0.01):\n\n chaine_initiale = chaine\n\n chaine = chaine.replace(param.separateur_decimal, \".\")\n\n\n m = re.match(r\"(?P[^:]+)\" # \"f(x)=2x+3\"\n r\"[ ]+sur[ ]+\" # \" sur \"\n r\"\\[(?P[^]]+)\\]\" # \"[-5;5]\"\n r\"(.*[ ]+pas[ ]+(de[ ]+)?\" # \" avec un pas de \"\n r\"(?P[0-9.]+))?\", # \"0,01\"\n chaine)\n\n # NOTA: Pour l'instant, seul un intervalle de la forme [a;b] est supporté.\n\n if m is None:\n raise ValueError(\"Format incorrect.\")\n\n fonction = m.group('fonction')\n intervalle = m.group('intervalle')\n pas = _eval_math(m.group('pas') or '1')\n\n\n # Correction automatique: [-5,5] est remplacé par [-5;5]\n # (Attention, toutes les virgules ont été converties en points auparavant !)\n if ';' not in intervalle:\n intervalle = intervalle.replace('.', ';')\n a, b = intervalle.split(';')\n a = _eval_math(a)\n\n code = \"%s:[%s]:%s,%s..%s\" % (fonction, precision, a, a + pas, b)\n\n if param.debug and param.verbose:\n print('Code TABVal:', code)\n return tabval(code, formatage_antecedents=formatage_antecedents, formatage_images=formatage_images) + '% ' + chaine_initiale + '\\n'\n\n\n\ndef tabval(chaine='', formatage_antecedents='VAL', formatage_images='VAL', precision=0.01):\n r\"\"\"Syntaxe:\nfonction: [precision d'arrondi]: 1ere valeur,2e valeur..valeur finale\n\nExemples:\n\\sin(x): -5,-4.9..5\nh(x)=sin(x)+1: [0.01]: -5,-4.5..0 ; 0,1..3\n\nUtilisez ; pour séparer plusieurs bloc de valeurs, et // pour indiquer\nun retour à la ligne (si le tableau est trop long).\n\n`formatage_images` contient éventuellement une formule pour formater\nles valeurs, par exemple '\\nombre{VAL}'. La variable VAL correspond à la\nvaleur du résultat.\n\"\"\"\n\n# f(x)=x+4:-5,-4..0 ; 2 ; 5,7..10// 12,14..20\n# f(x)=x+4:-5..-4..0; 2; 5..7..10// 12,14..20\n# f(x)=x+4:-5 -4+1..0\n\n chaine_originale = chaine = chaine.strip()\n\n if ':' not in chaine:\n return _auto_tabval(chaine, formatage_antecedents=formatage_antecedents,\n formatage_images=formatage_images,\n precision=precision)\n\n chaine = chaine.replace(r'\\\\', '\\n').replace('//', '\\n')\n sequence = chaine.split(\":\", 2)\n\n if len(sequence) == 3:\n precision = _eval_math(sequence[1].strip('[] '))\n\n legende = [txt.strip() for txt in sequence[0].split(\"=\", 1)]\n if len(legende) == 2:\n fonction, expression = legende\n # On devine la variable (en principe, elle est entre parenthèses)\n deb = fonction.find(\"(\")\n fin = fonction.find(\")\")\n if deb == -1:\n variable = \"x\"\n else:\n variable = fonction[deb+1:fin].strip()\n else:\n fonction = expression = legende[0]\n # Reste à deviner la variable.\n # On cherche les lettres isolées (sauf 'e', qui représente exp(1))\n m = re.search('(? 0:\n return str(a)\n return \"\"\n\ndef getRandomTurn(location, previousDirection, length, l):\n if location == 0:\n if length == l-1:\n return [r.choice([\"W\", \"S\"]),5]\n elif previousDirection == \"S\":\n return [\"E\", 1]\n elif previousDirection == \"W\":\n return [\"N\", 3]\n else:\n opt = [[\"N\",3],[\"E\",1]]\n return r.choice(opt)\n\n elif location == 1:\n if length == l-1:\n return [r.choice([\"E\", \"S\"]),5]\n elif previousDirection == \"S\":\n return [\"W\", 0]\n elif previousDirection == \"E\":\n return [\"N\", 2]\n else:\n opt = [[\"N\",2],[\"W\",0]]\n return r.choice(opt)\n\n elif location == 2:\n if length == l-1:\n return [r.choice([\"N\", \"E\"]),5]\n elif previousDirection == \"N\":\n return [\"W\", 3]\n elif previousDirection == \"E\":\n return [\"S\", 1]\n else:\n opt = [[\"S\",1],[\"W\",3]]\n return r.choice(opt)\n\n elif location == 3:\n if length == l-1:\n return [r.choice([\"W\", \"N\"]),5]\n elif previousDirection == \"W\":\n return [\"S\", 0]\n elif previousDirection == \"N\":\n return [\"E\", 2]\n else:\n opt = [[\"S\",0],[\"E\",2]]\n return r.choice(opt)\n\n\n\n \n","repo_name":"angieexu/MTHE-493-TrafficLights","sub_path":"tools/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"4"} +{"seq_id":"673402153","text":"import datetime\nimport endpoints\nfrom google.appengine.ext import ndb\nimport logging\nimport json\n\nfrom google.appengine.ext.ndb import blobstore\n\nfrom anatel_api_messages import BlobResponseMessage\nfrom anatel_api_messages import EDSuccessResponseMessage, ExecutiveDirectorGetResponseMessage\nfrom anatel_api_messages import ExecutiveDirectorsGetResponseMessage\nfrom anatel_api_messages import EDProfessionalHistoryGetResponseMessage\n\nTIME_FORMAT_STRING = '%d/%m/%Y'\n\n\nclass ExecutiveDirectors(ndb.Expando):\n \"\"\"\n Models the Anatel Executive Directors Border\n Root Property\n \"\"\"\n directorName = ndb.StringProperty(required=True)\n directorPhotoKey = ndb.StringProperty()\n placeOfBirth = ndb.StringProperty()\n mandates = ndb.JsonProperty(required=True)\n appointment = ndb.IntegerProperty(required=True)\n biography = ndb.TextProperty(compressed=False)\n refLinks = ndb.JsonProperty(required=False)\n politicalPartyAffinity = ndb.StringProperty(compressed=False)\n\n @property\n def timestamp(self):\n \"\"\"\n Property to format a datetime object to string.\n \"\"\"\n return self.join_date.strftime(TIME_FORMAT_STRING)\n\n def to_message(self):\n\n query_result = {'directorId': self.key.id(),\n 'directorName': self.directorName,\n 'directorPhotoKey': self.directorPhotoKey,\n 'placeOfBirth': self.placeOfBirth,\n 'mandates': self.mandates,\n 'appointment': self.appointment,\n 'biography': self.biography,\n 'refLinks': self.refLinks,\n 'politicalPartyAffinity': self.politicalPartyAffinity}\n\n return ExecutiveDirectorGetResponseMessage(queryResult=json.dumps(query_result))\n\n\n def to_success_message(self):\n return EDSuccessResponseMessage(entityKey=self.key.id(),\n serverResponse=self.serverResponse\n )\n\n @staticmethod\n def directors_to_message(directors):\n #logging.getLogger().setLevel(logging.INFO)\n #logging.info('%s' % directors)\n\n computed_directors = []\n for director in directors:\n #logging.info('%s' % director)\n director_dict = {'directorId': director.key.id(),\n 'directorName': director.directorName,\n 'directorPhotoKey': director.directorPhotoKey,\n 'placeOfBirth': director.placeOfBirth,\n 'mandates': json.dumps(director.mandates),\n 'appointment': director.appointment,\n 'biography': director.biography,\n 'refLinks': json.dumps(director.refLinks),\n 'politicalPartyAffinity': director.politicalPartyAffinity}\n computed_directors.append(director_dict)\n\n message_json = json.dumps(computed_directors, sort_keys=False, indent=0)\n\n #logging.info('message_json')\n #logging.info('%s' % computed_directors.__str__())\n return ExecutiveDirectorsGetResponseMessage(queryResult=message_json)\n\n @staticmethod\n def to_director_message(data):\n return json.dumps(data, sort_keys=True, indent=4)\n\n @classmethod\n def get_directors(cls, message):\n #logging.info(message.filter)\n\n directors_filters = json.loads(message.filter)\n if not directors_filters:\n directors_filters = [0]\n q = cls.query()\n q = q.filter(cls.appointment.IN(directors_filters))\n q = q.order(cls.directorName)\n\n return q.fetch(100)\n\n @classmethod\n def get_director(cls, message):\n\n director_id = message.directorId\n return cls.get_by_id(director_id)\n\n\nclass ProfessionalHistory(ndb.Expando):\n \"\"\"\n Models the Executive Director's professional history\n ExecutiveDirectors child\n\n poControl => position was taken after mandate at Anatel?\n piControl => is this a private company?\n \"\"\"\n institutionName = ndb.StringProperty(required=True)\n position = ndb.StringProperty(required=True)\n admissionDate = ndb.DateProperty(required=True)\n exitDate = ndb.DateProperty(required=True)\n poControl = ndb.BooleanProperty(required=True)\n piControl = ndb.BooleanProperty(required=True)\n\n def to_history_message(self):\n return EDProfessionalHistoryGetResponseMessage(directorKey=self.directorKey,\n entityKey=self.key.id(),\n professionalHistory=self.professionalHistory)\n\n\nclass BlobProcessing(ndb.Model):\n \"\"\"\n property to processes a blob upload steps\n \"\"\"\n\n blob_response_string = ndb.StringProperty(required=True)\n blob_response_type = ndb.StringProperty(required=True)\n\n def to_message(self):\n return BlobResponseMessage(\n blobResponseString=self.blob_response_string,\n blobResponseType=self.blob_response_type\n )\n\n @classmethod\n def get_upload_url(cls, message):\n blob_upload_url = blobstore.create_upload_url('/directors')\n\n resp = cls(\n blob_response_type=message.type,\n blob_response_string=blob_upload_url\n )\n\n return resp","repo_name":"samcarecho/anatelAPI","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"32365881850","text":"import numpy as np\nimport cv2\nfrom networktables import NetworkTables\n\nimport math\nfrom time import perf_counter\nimport collections\nimport argparse\n\n\n\n##########################################\n############# some utilities #############\n##########################################\n\ndef fadeHSV(image, mask):\n fade = cv2.multiply(image, (0.6,))\n cv2.subtract(image, fade, image, cv2.bitwise_not(mask))\n\ndef getKernel(size):\n return cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (size,size))\n\ndef getColorMask(input):\n # convert to HSV\n hsv = cv2.cvtColor(input, cv2.COLOR_BGR2HSV)\n cv2.medianBlur(hsv, 5, hsv)\n \n # threshold\n global minColor, maxColor\n halfW = hsv.shape[1] // 2\n maskL = cv2.inRange(hsv[:, :halfW], minColor[0], maxColor[0])\n maskR = cv2.inRange(hsv[:, halfW:], minColor[1], maxColor[1])\n mask = np.hstack((maskL, maskR))\n \n return mask, hsv\n\n\n\n#########################################\n############# main pipeline #############\n#########################################\n\nMAX_LINE_ANGLE = 15\n\nangleMap = np.zeros((1,1))\nangleMask = None\npivotChanged = True\ndef initAngleMap(shape):\n start = perf_counter()\n \n global angleMap, angleMask, pivotChanged\n height, width = shape\n \n # create angleMap\n dxs = np.tile(np.arange(width) - pivotLoc[0], (height,1))\n dys = np.tile(np.arange(height).reshape(height,1) - pivotLoc[1], (1,width))\n invertMask = dxs < 0\n dxs[invertMask] = np.negative(dxs[invertMask])\n dys[invertMask] = np.negative(dys[invertMask])\n angleMap = np.degrees(np.arctan2(dys, dxs))\n \n # create angleMask\n angleMask = cv2.inRange(angleMap, -MAX_LINE_ANGLE, +MAX_LINE_ANGLE)\n cx, cy, r = int(pivotLoc[0]), int(pivotLoc[1]), shape[1]//12\n cv2.rectangle(angleMask, (cx-r,cy-r), (cx+r,cy+r), 0, cv2.FILLED)\n \n pivotChanged = False\n \n end = perf_counter()\n if args.debug_timing: print(f\"initAngleMap took {int((end-start)*1000)} ms\")\n\nautoPivotMask = None\ndef autoDetectPivot():\n start = perf_counter()\n \n global pivotLoc, pivotChanged\n if autoPivotMask is not None:\n # filter contours by aspect ratio\n _, contours, _ = cv2.findContours(autoPivotMask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n def keep(contour):\n _, (w, h), _ = cv2.minAreaRect(contour)\n if w == 0 or h == 0: return False\n aspect = w / h\n if aspect < 1.0: aspect = 1/aspect\n return aspect > 4.5\n contours = [c for c in contours if keep(c)]\n if len(contours) == 0: return\n \n # set pivot to median of the filtered blobs\n xs, ys = np.hstack(c[:, 0, 0] for c in contours), np.hstack(c[:, 0, 1] for c in contours)\n if len(xs) > 0 and len(ys) > 0:\n pivotLoc = ((xs.max()+xs.min())/2, (ys.max()+ys.min())/2)\n pivotChanged = True\n \n end = perf_counter()\n if args.debug_timing: print(f\"autoDetectPivot took {int((end-start)*1000)} ms\")\n\ndef process(input):\n shape = input.shape[:2]\n height, width = shape\n \n global pivotLoc\n if pivotLoc is None:\n pivotLoc = (width//2, height//2)\n if pivotChanged or angleMap.shape != shape:\n initAngleMap(shape)\n \n if args.auto:\n autoSetColor(input)\n \n start = perf_counter()\n \n # get the color mask\n global curFrame\n mask, curFrame = getColorMask(input)\n \n # dilate the mask a bit\n cv2.dilate(mask, getKernel(4), mask)\n \n # compute distance transform\n dist = cv2.distanceTransform(cv2.copyMakeBorder(mask, 1,1,1,1, cv2.BORDER_CONSTANT, value=0), cv2.DIST_L2, 3)\n dist = dist[1:-1, 1:-1] # cut off the temporary border\n maxDist = float(dist.max())\n \n # threshold the distance transform\n mask2 = cv2.inRange(dist, maxDist*0.7, maxDist) if maxDist > 0 else np.zeros(shape, dtype=np.uint8)\n global autoPivotMask\n autoPivotMask = mask2.copy()\n \n # mask out bad angles\n cv2.bitwise_and(mask2, angleMask, mask2)\n \n if cv2.countNonZero(mask2) > 0:\n # compute average angle\n angle = cv2.mean(angleMap, mask2)[0]\n \n # histogram...\n #...\n \n updateAngle([-angle, -angle])\n else:\n angle = None\n updateAngle(None)\n \n \n \n end = perf_counter()\n if args.debug_timing: print(f\"process took {int((end-start)*1000)} ms\")\n \n ### draw debug info onto the input image and show it ###\n if args.debug_mask:\n cv2.imshow(\"mask\", getColorMask(input)[0])\n cv2.imshow(\"dist transform\", dist/(maxDist+0.01))\n if mask2 is not None: cv2.imshow(\"mask2\", autoPivotMask)\n \n output = input.copy()\n fadeHSV(output, mask)\n \n # blow up image for easier viewing\n if args.roi_scale != 1.0:\n output = cv2.resize(output, (0,0), fx=args.roi_scale, fy=args.roi_scale, interpolation=cv2.INTER_NEAREST)\n \n # draw pivot and detected angle\n cx, cy = int(pivotLoc[0]*args.roi_scale), int(pivotLoc[1]*args.roi_scale)\n if angle is not None:\n dx = int(2000*math.cos(math.radians(angle)))\n dy = int(2000*math.sin(math.radians(angle)))\n cv2.line(output, (cx-dx,cy-dy), (cx+dx,cy+dy), (0,255,0), 1, cv2.LINE_AA)\n cv2.circle(output, (cx,cy), 3, (255,255,0), cv2.FILLED)\n \n def drawText(text, x, y, color, size=0.4, fromM=0):\n textSz, _ = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, size, 1)\n y += int(textSz[1]*fromM)\n cv2.putText(output, text, (x, y), cv2.FONT_HERSHEY_SIMPLEX,\n size, color, 1, cv2.LINE_AA)\n \n # FPS/debug text\n global dt, fps\n debugStr = \"\"\n if dt is not None:\n debugStr += f\"{int(dt*1000)} ms\"\n if fps is not None:\n debugStr += f\" ({int(fps)} FPS)\"\n drawText(debugStr, 10, int(height*args.roi_scale)-10, (0,255,0))\n if not NetworkTables.isConnected():\n drawText(\"Not Connected!\", 10, int(height*args.roi_scale)-45, (0,0,255))\n else:\n drawText(\"Connected!\", 10, int(height*args.roi_scale)-45, (0,255,0))\n drawText(\"tuning mode = auto\" if args.auto else \"tuning mode = manual\", 60, 29, (0, 255, 0))\n if selectingPivot:\n drawText(\"SELECTING PIVOT\", 60, 47, (0, 255, 0))\n drawText(f\"using device {args.device}\", 10, int(height*args.roi_scale)-30, (0,255,0))\n \n # visualize the detected angle and state\n angle = getAngle()\n if angle is not None:\n PRE_SZ = 50\n cv2.rectangle(output, (0, 0), (PRE_SZ, PRE_SZ), (255,255,255), cv2.FILLED)\n rads = math.radians(angle)\n offX = 200*math.cos(rads)\n offY = -200*math.sin(rads)\n cv2.line(output[0:PRE_SZ, 0:PRE_SZ], (int(PRE_SZ/2-offX),int(PRE_SZ/2-offY)), (int(PRE_SZ/2+offX),int(PRE_SZ/2+offY)), (0,0,0), lineType=cv2.LINE_AA)\n drawText(f\"angle = {int(angle*100)/100} deg (tip = {getTip()})\", 60, 3, (0,255,0), fromM=1)\n if errorMsg is not None:\n drawText(errorMsg, 5, 60, (0,0,255), fromM=1)\n \n cv2.line(output, (output.shape[1]//2, 0), (output.shape[1]//2, output.shape[0]), (255, 0, 0), 2)\n cv2.imshow(\"raw\", output)\n\n\n\n#########################################\n######### automatic calibration #########\n#########################################\n\nHIST_SMOOTH_RADIUS = 2\nHIST_SMOOTH_KERNEL = np.hamming(HIST_SMOOTH_RADIUS*2 + 1)\nHIST_SMOOTH_KERNEL /= HIST_SMOOTH_KERNEL.sum()\ndef getHistogram(hsv, channel, mask, normMax=255, reduce=3):\n maxV = [180,255,255][channel]\n hist = cv2.calcHist([hsv[:, :, channel]], [0], mask, [maxV//reduce], [0,maxV])\n \n # smooth histogram\n hist = np.r_[hist[-HIST_SMOOTH_RADIUS:], hist, hist[:HIST_SMOOTH_RADIUS]]\n hist = hist.reshape(len(hist))\n hist = np.convolve(hist, HIST_SMOOTH_KERNEL, mode=\"valid\")\n \n cv2.normalize(hist, hist, 0, normMax, cv2.NORM_MINMAX)\n return hist\n\ndef drawHistogram(hist, markers=[], bestMarker=-1):\n n = hist.shape[0]\n m = 180//n\n histImage = np.zeros((256, n*2, 3), np.uint8)\n hist = np.int32(np.around(hist))\n markers = markers + [bestMarker]\n for x,y in enumerate(hist):\n cv2.line(histImage, (x*2,256), (x*2,256-y), (x*m,255,255))\n cv2.line(histImage, (x*2+1,256), (x*2+1,256-y), (x*m+1,255,255))\n if x in markers:\n color = (0,0,255)\n if x == bestMarker: color = (60,255,255)\n cv2.line(histImage, (x*2,0), (x*2,256-y), color)\n histImage = cv2.cvtColor(histImage, cv2.COLOR_HSV2BGR)\n cv2.imshow(\"histogram\", histImage)\n\ndef getMaxima(hist):\n def at(i): return hist[(i) % len(hist)]\n return [i for i in range(1, len(hist)-1) if hist[i] > at(i-1) and hist[i] > at(i+1)]\ndef getClosestExtremum(extrema, target):\n return extrema[np.argmin([abs(i-target) for i in extrema])]\ndef getClosestMinLeft(hist, maxI):\n minI = maxI\n while minI > 1 and hist[minI] > hist[minI-1]:\n minI -= 1\n return minI\ndef getClosestMinRight(hist, maxI):\n minI = maxI\n while minI < len(hist)-2 and hist[minI] > hist[minI+1]:\n minI += 1\n return minI\n\nlastMax, lastMin0, lastMin1 = None, None, None\nMIN_HUE, TARGET_HUE, MAX_HUE = 126, 148, 164\ndef computeHueRange(hsv):\n start = perf_counter()\n \n # compute hue histogram\n mask = cv2.inRange(hsv, (0, 64, 64), (180, 255, 255))\n hist = getHistogram(hsv, 0, mask, reduce=2)\n \n # find hue range\n maxima = getMaxima(hist)\n bestMax = getClosestExtremum(maxima, TARGET_HUE / 2)\n min0 = getClosestMinLeft(hist, bestMax)\n min1 = getClosestMinRight(hist, bestMax)\n \n # update positions\n global lastMax, lastMin0, lastMin1\n def update(last, cur):\n cur = min(max(cur, MIN_HUE//2), MAX_HUE//2) # clamp to for-sure range\n if last is None: return cur\n if cur > last: return last+1\n if cur < last: return last-1\n return last\n lastMax = update(lastMax, bestMax)\n lastMin0 = update(lastMin0, min0)\n lastMin1 = update(lastMin1, min1)\n \n end = perf_counter()\n if args.debug_timing: print(f\" computeHueRange took {int((end-start)*1000)} ms\")\n \n if args.debug_histograms:\n # visualize the histogram\n drawHistogram(hist, [lastMin0, lastMin1], lastMax)\n #drawHistogram(hist, [MIN_HUE//2, MAX_HUE//2], TARGET_HUE//2)\n \n return lastMin0*2, lastMin1*2, lastMax*2\n\nSCALE = 8\nH_BINS = 256//SCALE\nhists = collections.deque(maxlen=15)\n\ndef svHist(hsv, mask, peakHue):\n start = perf_counter()\n \n # compute histogram\n hsv = hsv[mask > 128]\n weights = hsv[:,0] - peakHue\n weights = 1 / (0.004 + weights*weights)\n hist, _, _ = np.histogram2d(hsv[:,2], hsv[:,1], weights=weights, bins=H_BINS, range=[[0,255],[0,255]])\n hists.append(hist)\n \n # normalize\n hist = np.sum(hists, axis=0)\n cv2.GaussianBlur(hist, (3,3), 0.5, hist)\n hist = hist/hist.max() # normalize\n \n def tryThreshold(thresh):\n # create mask\n mask = cv2.inRange(hist, thresh, 999999)\n # cv2.erode(mask, cv2.getStructuringElement(cv2.MORPH_RECT, (2,2)), mask)\n \n # find best contour\n _, contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n if len(contours) == 0:\n return None, 0, mask\n \n def contourScore(c):\n cMask = np.zeros((H_BINS,H_BINS), dtype=np.uint8)\n cv2.drawContours(cMask, [c], 0, 255, cv2.FILLED)\n integral = hist[cMask>128].sum()\n x,y,w,h = cv2.boundingRect(c)\n d = (x+w/2) + (y+h/2) # distance along diagonal\n return integral + d*0.5\n scores = [contourScore(c) for c in contours]\n bestIndex = np.argmax(scores)\n return contours[bestIndex], scores[bestIndex], mask\n \n contour, score, mask = tryThreshold(np.percentile(hist, 90))\n \n if args.debug_histograms:\n cv2.imshow(\"SV mask\", cv2.resize(mask, (512,512), interpolation=cv2.INTER_NEAREST))\n \n # get bounding rect\n x,y,w,h = cv2.boundingRect(contour)\n \n end = perf_counter()\n if args.debug_timing: print(f\" svHist took {int((end-start)*1000)} ms\")\n \n if args.debug_histograms:\n # display\n # hist = mask.astype(np.float)/255\n hist = cv2.resize((hist*255).astype(np.uint8), (512,512), interpolation=cv2.INTER_NEAREST)\n hist = cv2.cvtColor(hist, cv2.COLOR_GRAY2BGR)\n cv2.rectangle(hist, (x*SCALE*2,y*SCALE*2), ((x+w)*SCALE*2,(y+h)*SCALE*2), (0,255,0))\n cv2.imshow(\"SV histogram\", hist)\n \n return x*SCALE, (x+w)*SCALE, y*SCALE, (y+h)*SCALE\n\ndef autoSetColor(input):\n start = perf_counter()\n \n # convert to HSV\n hsv = cv2.cvtColor(input, cv2.COLOR_BGR2HSV)\n \n # get the hue range\n minH, maxH, peakHue = computeHueRange(hsv)\n \n # process/visualize S-V histogram\n mask = cv2.inRange(hsv, (minH, 40, 40), (maxH, 255, 255))\n # TODO: add hysteresis to this rectangle (like for the hue range)\n minS,maxS,minV,maxV = svHist(hsv, mask, peakHue)\n if args.debug_hue_mask:\n scaledMask = cv2.resize(mask, (0,0), fx=args.roi_scale, fy=args.roi_scale, interpolation=cv2.INTER_NEAREST)\n cv2.imshow(\"hue mask\", scaledMask)\n \n # set color range\n global minColor, maxColor\n minColor[0] = (minH, minS, minV)\n maxColor[0] = (maxH, 255, 255)\n minColor[1] = minColor[0]\n maxColor[1] = maxColor[0]\n \n \n end = perf_counter()\n if args.debug_timing: print(f\"autoSetColor took {int((end-start)*1000)} ms\")\n\n\n\n#########################################\n############ angle filtering ############\n#########################################\n\n# constants (TODO: tune these)\nMAX_SCALE_SPEED = 40.0 # maximum normal movement speed (degrees per second)\nSMOOTH_HISTORY = 1.0 # amount of history to consider for smoothing (seconds)\nSMOOTH_FIT_DEGREE = 2 # degree of polynomial fit for smoothing\nSTEADY_HISTORY = 1.0 # amount of history to consider for steadiness (seconds)\nSTEADY_THRESHOLD = 4.0 # angle variation considered \"steady\" (degrees)\nMAX_SKEW = 3.0 #2.2 # maximum skew between the top & bottom lines (degrees)\nTIPPED_THRESHOLD = 3.5 # angle at which the scale is \"tipped\" (degrees)\nUNTIPPED_THRESHOLD = 2.7 # angle at which the scale is no longer \"tipped\" (degrees)\n\n# ignore RankWarnings from np.polyfit\nimport warnings\nwarnings.simplefilter(\"ignore\", np.RankWarning)\n\ncurAngle = 0\nzeroPoint = 0\nzeroed = False\nlastUpdate = None\nsmoothHistory = collections.deque()\nsteadyHistory = collections.deque()\nwaitingForSteady = True\n\nerrorMsg = None\n\ndef isSteady():\n if len(steadyHistory) == 0: return False\n angles = [e[1] for e in steadyHistory]\n minA = min(angles)\n maxA = max(angles)\n return maxA - minA < STEADY_THRESHOLD\n\ndef updateAngle(lineAngles):\n global curAngle, zeroPoint, zeroed, lastUpdate, waitingForSteady, errorMsg\n errorMsg = None\n if not zeroed:\n errorMsg = \"NOT ZEROED YET\"\n \n # calculate dt\n now = perf_counter()\n if lastUpdate is None: lastUpdate = now\n dt = now - lastUpdate\n lastUpdate = now\n \n maxDelta = MAX_SCALE_SPEED*dt\n newAngle = None\n \n # history update functions\n def updateHistory(list, history, value):\n list.append((now, value))\n while now - list[0][0] > history:\n list.popleft()\n def updateSmoothHistory():\n updateHistory(smoothHistory, SMOOTH_HISTORY, curAngle)\n \n # use the the angles from the lines, if available\n if lineAngles is None:\n errorMsg = \"LINES FAILED\"\n else:\n a1, a2 = lineAngles\n if abs(a1 - a2) > MAX_SKEW:\n errorMsg = \"SKEWED\"\n else:\n newAngle = (a1 + a2) / 2\n \n # fall back to angles from centers of blobs, if necessary\n if newAngle is None:\n if errorMsg is None: errorMsg = \"NO GOOD DATA\"\n updateSmoothHistory()\n return\n \n delta = newAngle - curAngle\n \n # update history\n updateHistory(steadyHistory, STEADY_HISTORY, newAngle)\n \n # if it's moving too fast, stop updating until it's steady again\n if abs(delta) > maxDelta:\n waitingForSteady = True\n if waitingForSteady and not isSteady():\n errorMsg = \"STEADYING\"\n updateSmoothHistory()\n return\n waitingForSteady = False\n \n curAngle += min(max(delta, -maxDelta), +maxDelta)\n updateSmoothHistory()\n\ndef getRawAngle():\n return curAngle\n\ndef getAngle():\n if len(smoothHistory) == 0: return 0.0\n # do a polynomial fit on the history, putting more weight on recent data points\n weights = [x**0 for x in range(1, len(smoothHistory)+1)]\n fitFunc = np.poly1d(np.polyfit(*zip(*smoothHistory), SMOOTH_FIT_DEGREE, w=weights))\n lastTime = smoothHistory[-1][0]\n return fitFunc(lastTime) - zeroPoint\n\ncurTip = 0\ndef getTip():\n global curTip\n angle = getAngle()\n if abs(angle) < UNTIPPED_THRESHOLD: curTip = 0\n elif angle > +TIPPED_THRESHOLD: curTip = +1\n elif angle < -TIPPED_THRESHOLD: curTip = -1\n return curTip\n\ndef zeroAngle():\n global zeroPoint, zeroed\n angle = getRawAngle()\n if angle is not None:\n zeroPoint = angle\n zeroed = True\n\n\n\n##########################################\n######### command-line arguments #########\n##########################################\n\nparser = argparse.ArgumentParser(description=\"Program to track the scale arm using OpenCV. (by Quinn Tucker '18)\")\nparser.add_argument(\"-n\", \"--no-network\", action=\"store_true\", help=\"don't initialize/output to NetworkTables\")\noptGroup = parser.add_mutually_exclusive_group()\noptGroup.add_argument(\"-d\", \"--device\", type=int, default=0, metavar=\"ID\",\n help=\"device ID of the camera to use (default: %(default)s)\")\noptGroup.add_argument(\"-i\", \"--input-image\", metavar=\"FILE\", help=\"optional image to use instead of a live camera\")\noptGroup.add_argument(\"-v\", \"--input-video\", metavar=\"FILE\", help=\"optional video to use instead of a live camera\")\nparser.add_argument(\"-s\", \"--scale\", type=float, default=1.0, metavar=\"FACTOR\",\n help=\"amount to up/downsample each frame (optional)\")\nparser.add_argument(\"-a\", \"--auto\", action='store_true', default=False,\n help=\"set hue value automatically\")\nparser.add_argument(\"--hue-shift\", type=float, default=0.0, metavar=\"DEGREES\",\n help=\"amount to shift the hue of each frame (optional)\")\nparser.add_argument(\"--raw-scale\", type=float, default=1.0, metavar=\"FACTOR\",\n help=\"amount to scale the raw frame display by (default: %(default)s)\")\nparser.add_argument(\"--roi-scale\", type=float, default=2.0, metavar=\"FACTOR\",\n help=\"amount to scale the region-of-interest display by (default: %(default)s)\")\nparser.add_argument(\"--csv-output\", type=argparse.FileType(\"w\"), metavar=\"FILE\",\n help=\"optional file to write angle data to\")\nparser.add_argument(\"--ip\", type=str, default='10.2.54.2', metavar=\"IP\",\n help=\"ip to connect to.\")\ndebugGroup = parser.add_argument_group(title=\"debug flags\")\ndebugGroup.add_argument(\"--debug-histograms\", action=\"store_true\", help=\"show the hue and saturation-value histograms\")\ndebugGroup.add_argument(\"--debug-mask\", action=\"store_true\", help=\"show the thresholded color mask / debug masks\")\ndebugGroup.add_argument(\"--debug-hue-mask\", action=\"store_true\", help=\"show the hue mask\")\ndebugGroup.add_argument(\"--debug-timing\", action=\"store_true\", help=\"print how long various operations take\")\nargs = parser.parse_args()\n\n\n\n#########################################\n########## robot communication ##########\n#########################################\n\nif args.no_network:\n print(\"Skipping NetworkTables initialization\")\nelse:\n print(\"Using: \" + args.ip)\n NetworkTables.initialize(server=args.ip)\n smartDashboard = NetworkTables.getTable(\"SmartDashboard\")\n\n#########################################\n############### main code ###############\n#########################################\n\nminColor = [(0,0,0), (0,0,0)]\nmaxColor = [(0,0,0), (0,0,0)]\nH_PAD = 10\nS_PAD = 20\nV_PAD = 20\n\nroi = None\ngotROI = False\n\npivotLoc = None\nselectingPivot = False\n\nfrozen = False\n\nglobal width, height\ndef onMouse_raw(event, x, y, flags, param):\n global roi, gotROI, width, height\n x = int(x/args.raw_scale)\n y = int(y/args.raw_scale)\n x = min(max(x, 0), width-1)\n y = min(max(y, 0), height-1)\n if event == cv2.EVENT_LBUTTONDOWN:\n roi = (x, y, x, y)\n gotROI = False\n elif event == cv2.EVENT_LBUTTONUP:\n if roi[0] > roi[2]: roi = (roi[2], roi[1], roi[0], roi[3])\n if roi[1] > roi[3]: roi = (roi[0], roi[3], roi[2], roi[1])\n if roi[0] < roi[2] and roi[1] < roi[3]:\n gotROI = True\n \n if flags & cv2.EVENT_FLAG_LBUTTON:\n roi = roi[:2] + (x, y)\n\ndef onMouse(event, x, y, flags, param):\n global curFrame, minColor, maxColor, selectingPivot, pivotLoc, pivotChanged\n h, w = curFrame.shape[:2]\n x = int(x/args.roi_scale)\n y = int(y/args.roi_scale)\n \n if event == cv2.EVENT_LBUTTONUP or event == cv2.EVENT_RBUTTONUP:\n selectingPivot = False\n \n leftDown = flags & cv2.EVENT_FLAG_LBUTTON != 0\n rightDown = flags & cv2.EVENT_FLAG_RBUTTON != 0\n if not (leftDown or rightDown):\n return\n \n if selectingPivot:\n pivotLoc = (x, y)\n pivotChanged = True\n return\n \n ci = 0 if x < w/2 else 1\n \n if event == cv2.EVENT_RBUTTONDOWN:\n maxColor[ci] = (0,0,0)\n \n def addPixel(x, y):\n global curFrame, minColor, maxColor\n if x >= w or y >= h:\n return\n color = curFrame[y, x]\n newMinColor = (float(color[0])-H_PAD, float(color[1])-S_PAD, float(color[2])-V_PAD)\n newMaxColor = (float(color[0])+H_PAD, float(color[1])+S_PAD, float(color[2])+V_PAD)\n if maxColor[ci] == (0,0,0):\n minColor[ci] = newMinColor\n maxColor[ci] = newMaxColor\n elif leftDown or rightDown:\n minColor[ci] = tuple(map(min, minColor[ci], newMinColor))\n maxColor[ci] = tuple(map(max, maxColor[ci], newMaxColor))\n \n BRUSH_R = 4 # radius of brush\n for x2 in range(x-BRUSH_R, x+BRUSH_R+1):\n for y2 in range(y-BRUSH_R, y+BRUSH_R+1):\n addPixel(x2, y2)\n\ndef onKey(key):\n global minColor, maxColor, gotROI, exposure, selectingPivot\n print(f\"key: {key}\")\n if key == ord(\"c\") or key == ord(\"C\"):\n minColor[0], minColor[1] = (0,0,0), (0,0,0)\n maxColor[0], maxColor[1] = (0,0,0), (0,0,0)\n if key == ord(\"z\") or key == ord(\"Z\"):\n zeroAngle()\n if key == ord(\"a\") or key == ord(\"A\"):\n args.auto = True\n if key == ord(\"m\") or key == ord(\"M\"):\n args.auto = False\n if key == ord(\"r\") or key == ord(\"R\"):\n gotROI = False\n if key == ord(\"p\") or key == ord(\"P\"):\n selectingPivot = not selectingPivot\n if key == ord(\"o\") or key == ord(\"O\"):\n autoDetectPivot()\n if key == ord(\"d\") or key == ord(\"D\"):\n args.device += 1\n print(f\" switching device id to {args.device}\")\n global cap\n cap = initCapture()\n if key == ord(\"s\") or key == ord(\"S\"):\n cap.set(cv2.CAP_PROP_SETTINGS, 1)\n if key == ord(\"f\") or key == ord(\"F\"):\n global frozen\n frozen = not frozen\n if key == ord(\"-\") or key == ord(\"_\"):\n exposure -= 1\n cap.set(cv2.CAP_PROP_EXPOSURE, exposure)\n if key == ord(\"=\") or key == ord(\"+\"):\n exposure += 1\n cap.set(cv2.CAP_PROP_EXPOSURE, exposure)\n\n######### VideoCapture #########\ndef initCapture():\n if args.input_image is not None: return None\n print(\"Initializing VideoCapture...\")\n if args.input_video is not None:\n cap = cv2.VideoCapture(args.input_video)\n else:\n cap = cv2.VideoCapture(args.device)\n if not cap.isOpened():\n print(f\" failed to open camera device {args.device}\")\n print(f\" resetting device id to 0\")\n args.device = 0\n initCapture()\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)\n global exposure\n exposure = cap.get(cv2.CAP_PROP_EXPOSURE)\n cap.set(cv2.CAP_PROP_EXPOSURE, exposure) # disable auto exposure & white balance\n # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)\n # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)\n print(\" done.\")\n return cap\nglobal cap\ncap = initCapture()\n\nif args.input_image is not None:\n inputImage = cv2.imread(args.input_image)\n inputImage = cv2.resize(inputImage, (0,0), fx=args.scale, fy=args.scale)\n\nglobal dt, fps\ndt = fps = None\nframeCount = 0\nheartbeat = 0\nlastSecond = perf_counter()\nfirst_call = True\n\nwhile True:\n # read the next frame and make sure it's valid\n if args.input_image is not None:\n frame = inputImage.copy()\n elif not frozen:\n ret, frame = cap.read()\n def isFrameOK():\n if not ret or frame is None:\n return False\n for i in [0,1,2]:\n if cv2.countNonZero(frame[:,:,i]) > 0:\n return True\n return False\n if not isFrameOK():\n print(\"Got a bad frame, reinitializing.\")\n cap.release()\n cap = initCapture() # reopen the VideoCapture\n continue\n if args.scale != 1.0:\n frame = cv2.resize(frame, (0,0), fx=args.scale, fy=args.scale)\n \n height, width = frame.shape[:2]\n \n if args.hue_shift != 0.0:\n start = perf_counter()\n \n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n shift = (args.hue_shift//2) % 180\n if shift < 0: shift += 180\n if 180+shift > 255:\n tmp = frame[:, :, 0].astype(np.uint16) + np.uint16(shift)\n tmp[tmp > 180] -= 180\n frame[:, :, 0] = tmp\n else:\n frame[:, :, 0] += np.uint8(shift)\n frame = cv2.cvtColor(frame, cv2.COLOR_HSV2BGR)\n \n end = perf_counter()\n if args.debug_timing: print(f\"hue shift took {int((end-start)*1000)} ms\")\n \n # show the raw frame (with ROI rect)\n frameDisp = frame.copy()\n if args.raw_scale != 1.0:\n frameDisp = cv2.resize(frameDisp, (0,0), fx=args.raw_scale, fy=args.raw_scale)\n if roi is not None:\n sROI = tuple(int(v*args.raw_scale) for v in roi)\n cv2.rectangle(frameDisp, sROI[:2], sROI[2:], (0, 0, 255), 2)\n if not NetworkTables.isConnected():\n cv2.putText(frameDisp, \"NetworkTables is not connected\", (10, 20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,0,255), 1, cv2.LINE_AA)\n else:\n cv2.putText(frameDisp, \"NetworkTables is connected\", (10, 20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,255,0), 1, cv2.LINE_AA)\n\n if not gotROI:\n cv2.imshow(\"raw\", frameDisp)\n cv2.setMouseCallback(\"raw\", onMouse_raw)\n if first_call:\n cv2.moveWindow(\"raw\", 0, 0)\n first_call = False\n \n if gotROI:\n start = perf_counter()\n frameROI = frame[roi[1]:roi[3], roi[0]:roi[2]]\n process(frameROI)\n end = perf_counter()\n dt = end - start\n \n frameCount += 1\n if perf_counter() - lastSecond > 1.0:\n lastSecond += 1.0\n fps = frameCount\n frameCount = 0\n \n cv2.setMouseCallback(\"raw\", onMouse)\n \n # if args.csv_output is not None and dbg1 is not None:\n # args.csv_output.write(f\"{start}, {dbg1}, {dbg2}, {getTip()}\\n\")\n else:\n frameROI = None\n \n if not args.no_network:\n smartDashboard.putNumber(\"scaleAngle\", getAngle())\n smartDashboard.putNumber(\"scaleTip\", getTip())\n smartDashboard.putBoolean(\"scaleError\", errorMsg is not None)\n smartDashboard.putNumber(\"scaleHeartbeat\", heartbeat)\n heartbeat += 1\n \n key = cv2.waitKey(1) & 0xFF\n if key == 27:\n break\n elif key != 0xFF:\n onKey(key)\n\n# cleanup VideoCapture, windows, and CSV output\nif args.input_image is None:\n cap.release()\ncv2.destroyAllWindows()\nif args.csv_output is not None:\n args.csv_output.close()\n","repo_name":"Team254/FRC-2018-Public","sub_path":"dash/CheesyVision2.py","file_name":"CheesyVision2.py","file_ext":"py","file_size_in_byte":28043,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"4"} +{"seq_id":"12799806332","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def hasCycle(self, head: ListNode) -> bool:\n vis = {}\n while head:\n if head in vis:\n return True\n \n vis[head] = True\n head = head.next\n \n return False\n ","repo_name":"Waqar-107/LeetCode","sub_path":"Algorithms/Linked List/141. Linked List Cycle.py","file_name":"141. Linked List Cycle.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"4"} +{"seq_id":"40262874594","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the 'interQuartile' function below.\n# The function accepts following parameters:\n# 1. INTEGER_ARRAY values\n# 2. INTEGER_ARRAY freqs\n\ndef median(array):\n n=len(array)\n if n%2!=0: return array[(n-1)//2]\n else: return (array[(n-1)//2]+array[n//2])/2\n\ndef interQuartile(values, freqs):\n n=sum(freqs)\n newarr=[]\n for i in range(len(freqs)):\n for f in range(freqs[i]): newarr.append(values[i])\n newarr.sort() \n print(\"{:.1f}\".format(median(newarr[(n+1)//2:]) - median(newarr[:(n//2)]))) \n \n \n\nif __name__ == '__main__':\n n = int(input().strip())\n val = list(map(int, input().rstrip().split()))\n freq = list(map(int, input().rstrip().split()))\n interQuartile(val, freq)\n","repo_name":"sharvi24/10dayStatsChallenge","sub_path":"Day1: Interquartile Range/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"36493321010","text":"from Classes.router import Router\n\nclass RouterGrid():\n UP = 0\n RIGHT = 1\n DOWN = 2\n LEFT = 3\n DIRS = [0,1,2,3]\n\n def __init__(self, DIM1, DIM2, MSG_LEN, SAMPLE_THRESH, MSG_FREQ, PATH_LEN):\n self.DIM1 = DIM1\n self.DIM2 = DIM2 \n\n # table of routers\n self.routers = []\n for x in range(0, self.DIM1):\n cur_row = []\n for y in range(0, self.DIM2):\n # creates router with the address corresponding to network dimensions\n cur_row.append(Router([x, y], DIM1, DIM2, MSG_FREQ, MSG_LEN, PATH_LEN, SAMPLE_THRESH))\n self.routers.append(cur_row)\n \n def __len__(self):\n return self.DIM1\n\n def __getitem__(self, key):\n return self.routers[key]\n\n def get_port_in_dir(self, i, j, dir):\n if dir == self.UP:\n return self.routers[(i-1) % self.DIM1][j].ports[self.DOWN]\n elif dir == self.RIGHT:\n return self.routers[i][(j+1) % self.DIM2].ports[self.LEFT]\n elif dir == self.DOWN:\n return self.routers[(i+1) % self.DIM1][j].ports[self.UP]\n elif dir == self.LEFT:\n return self.routers[i][(j-1) % self.DIM2].ports[self.RIGHT]\n\n def get_router_index_in_dir(self, i, j, dir):\n if dir == self.UP:\n return (i-1) % self.DIM1, j\n elif dir == self.RIGHT:\n return i, (j+1) % self.DIM2\n elif dir == self.DOWN:\n return (i+1) % self.DIM1, j\n elif dir == self.LEFT:\n return i, (j-1) % self.DIM2\n \n\n def move_inter_router(self, i, j, chosen_ports = None):\n moved = []\n if chosen_ports is None:\n chosen_ports = self.DIRS\n for p in chosen_ports:\n if self.routers[i][j].ports[p].obuffer_is_ready():\n dest_port = self.get_port_in_dir(i, j, p)\n if dest_port.Ibuffer is None:\n dest_port.putI(self.routers[i][j].ports[p].getO())\n moved.append(p)\n return moved\n\n def follow_intra_movements(self, i, j, moved, time):\n '''Propagates an intra-router movement, i.e.\n processes all consequential movement'''\n for dir in moved:\n new_i, new_j = self.get_router_index_in_dir(i, j, dir)\n dir_moved = self.move_inter_router(new_i, new_j, chosen_ports=[(dir+2)%4])\n if dir_moved:\n self.follow_inter_movements(new_i, new_j, dir_moved, time)\n\n def follow_inter_movements(self, i, j, moved, time):\n '''Propagates an inter-router movement, i.e.\n processes all consequential movement'''\n moved = self.routers[i][j].move(time, chosen_ports = moved)\n if moved:\n self.follow_intra_movements(i, j, moved, time)\n","repo_name":"horenbergerb/VirtualCuthroughRouting","sub_path":"Classes/routergrid.py","file_name":"routergrid.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"19352366631","text":"\"\"\"\nEnd to End test cases\n\"\"\"\nimport os\nfrom typing import Any, List\nfrom unittest import TestCase\n\nfrom confluent_kafka import DeserializingConsumer\nfrom confluent_kafka.schema_registry.protobuf import ProtobufDeserializer\nfrom confluent_kafka.serialization import StringDeserializer\n\nfrom jobs.collectdata.collect_data import (\n collect_movielens_100k_data,\n collect_popular_tmdb_movie_data,\n collect_popular_tmdb_series_data,\n)\nfrom jobs.shared.metadata_pb2 import ItemMetadata\nfrom jobs.shared.user_interaction_pb2 import UserInteraction\nfrom jobs.shared.item_score_pb2 import ItemScores\n\n\nKAFKA_CONSUMER = Any\nMESSAGE = Any\n\n\ndef get_messages(consumer: KAFKA_CONSUMER) -> List[MESSAGE]:\n \"\"\"\n Polls a consumer and gets all of the new messages.\n\n :param consumer: Kafka consumer\n :return: LIst of collected messages\n \"\"\"\n no_messages = 0\n while no_messages < 12:\n msg = consumer.poll(timeout=5)\n if msg is not None:\n yield msg\n else:\n no_messages += 1\n\n\nclass EndToEndTestCase(TestCase):\n \"\"\"\n End to End test case for the following collecting data sources:\n * MovieLens 100k\n * TMDB Popular Movies\n * TMDB Popular Series\n \"\"\"\n\n @classmethod\n def setUpClass(cls) -> None:\n \"\"\"\n Creates kafka consumers that are used to validate the output\n :return:\n \"\"\"\n cls.api_key = os.environ[\"TMDB_API_KEY\"]\n cls.kafka_brokers = os.environ.get(\"KAFKA_BROKERS\", \"kafka:9092\")\n cls.schema_registry = os.environ.get(\"SCHEMA_REGISTRY\", \"http://schema-registry:8082\")\n cls.user_interaction_consumer = DeserializingConsumer(\n {\n \"group.id\": \"e2e-tests\",\n \"default.topic.config\": {\"auto.offset.reset\": \"smallest\"},\n \"bootstrap.servers\": cls.kafka_brokers,\n \"key.deserializer\": StringDeserializer(),\n \"value.deserializer\": ProtobufDeserializer(UserInteraction, conf={\"use.deprecated.format\": False}),\n }\n )\n cls.item_metadata_consumer = DeserializingConsumer(\n {\n \"group.id\": \"e2e-tests\",\n \"default.topic.config\": {\"auto.offset.reset\": \"smallest\"},\n \"bootstrap.servers\": cls.kafka_brokers,\n \"key.deserializer\": StringDeserializer(),\n \"value.deserializer\": ProtobufDeserializer(ItemMetadata, conf={\"use.deprecated.format\": False}),\n }\n )\n cls.item_scores_consumer = DeserializingConsumer(\n {\n \"group.id\": \"e2e-tests\",\n \"default.topic.config\": {\"auto.offset.reset\": \"smallest\"},\n \"bootstrap.servers\": cls.kafka_brokers,\n \"key.deserializer\": StringDeserializer(),\n \"value.deserializer\": StringDeserializer(),\n }\n )\n\n def test_collect_tmdb_popular_movies(self):\n \"\"\"\n Tests the tmdb popular movies collection job.\n\n :return:\n \"\"\"\n collect_popular_tmdb_movie_data(self.api_key, self.kafka_brokers, self.schema_registry)\n self.item_metadata_consumer.subscribe([\"metadata\"])\n self.item_scores_consumer.subscribe([\"popularity\"])\n\n self.assertEqual(20, len(list(get_messages(self.item_metadata_consumer))))\n self.assertEqual(1, len(list(get_messages(self.item_scores_consumer))))\n\n def test_collect_tmdb_popular_series(self):\n \"\"\"\n Tests the tmdb popular series collection job.\n\n :return:\n \"\"\"\n collect_popular_tmdb_series_data(self.api_key, self.kafka_brokers, self.schema_registry)\n self.item_metadata_consumer.subscribe([\"metadata\"])\n self.item_scores_consumer.subscribe([\"popularity\"])\n\n self.assertEqual(20, len(list(get_messages(self.item_metadata_consumer))))\n self.assertEqual(1, len(list(get_messages(self.item_scores_consumer))))\n\n def test_collect_movielens_100k(self):\n \"\"\"\n Tests the movielens 100k collection job.\n\n :return:\n \"\"\"\n collect_movielens_100k_data(self.api_key, self.kafka_brokers, self.schema_registry)\n self.user_interaction_consumer.subscribe([\"user-interaction\"])\n self.item_metadata_consumer.subscribe([\"metadata\"])\n\n user_interaction_messages = list(get_messages(self.user_interaction_consumer))\n item_metadata_messages = list(get_messages(self.item_metadata_consumer))\n self.assertGreater(len(list(item_metadata_messages)), 8_000)\n self.assertGreater(len(list(user_interaction_messages)), 50_000)\n self.assertEqual(\n {item_metadata_message.key() for item_metadata_message in item_metadata_messages},\n {user_interaction_message.value().item_id for user_interaction_message in user_interaction_messages},\n )\n","repo_name":"kylelmiller/movieaugur","sub_path":"offline-jobs/tests/e2e/dags/jobs/collectdata/test_collect_data.py","file_name":"test_collect_data.py","file_ext":"py","file_size_in_byte":4852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"6975859639","text":"# Objetos en Python\n\npersona = {\n 'nombre':'Daniel',\n 'apellidos':'Iriarte',\n 'edad':25,\n 'lenguajes':['python','Javascript']\n}\n\nprint(persona)\n\npersona['nombre'] = 'Daniel Jose'\npersona['edad'] = 30\nprint(persona)\npersona['lenguajes'].append('Java')\ndel persona['apellidos']\nprint(persona)\npersona['lenguajes'].append('Python')\n\nprint(persona.items())\nprint(persona.keys())\n\n#ciclos en python\ncount = 0\n\nwhile count < 5:\n print('Ejecucion #:' + str(count+1))\n count += 1\n#while():\n\nmy_list = [1,7,3,4,5,6]\n\nfor i in my_list:\n print(i)\n #print(my_list[i])\n\n\nname = 'Mario Salvador'\nname = name.replace('Mario','Juan')\nprint(name)\nprint((8 / 2) + 4 * 8)","repo_name":"Diriarte01/Projects","sub_path":"cursos_platzi/Python-basico/objeto.py","file_name":"objeto.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"21608854302","text":"from turtle import Turtle\n\n\nclass Scoreboard(Turtle):\n\n def __init__(self):\n super().__init__()\n self.color(\"white\")\n self.penup()\n self.hideturtle()\n self.l_score = 0\n self.r_score = 0\n self.display_score()\n\n def display_score(self):\n self.clear()\n self.setposition(-100, 250)\n self.write(f\"{self.l_score}\", font=(\"Courier\", 40, \"bold\"), align=\"center\")\n self.setposition(100, 250)\n self.write(f\"{self.r_score}\", font=(\"Courier\", 40, \"bold\"), align=\"center\")\n","repo_name":"securemedjay/ping_pong","sub_path":"scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"1481703955","text":"import abc\nfrom typing import List, Optional, Sequence, Tuple, Union\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nfrom monai.transforms import Compose, EnsureChannelFirstd, Invertd, ToTensord\nfrom monai.transforms.transform import Transform\nimport pandas as pd\nimport torch\nimport torch.utils.data as data\n\nfrom .readers import (\n MetaDatasetOnlyNiftiMetaReader,\n MetaDatasetMultipleMasksNiftiReader,\n MetaDatasetDicomReader,\n)\nfrom .type_definition import (\n MetaIntermediateItem,\n MetaReader,\n MetaDatasetReaderAbstract,\n MetaFinalItem,\n)\n\n\nclass MetaDatasetAbstract(abc.ABC):\n def __init__(self) -> None:\n super().__init__()\n \n @abc.abstractmethod\n def __len__(self) -> int:\n raise NotImplementedError()\n \n @abc.abstractmethod\n def __getitem__(self, idx: int) -> Union[MetaFinalItem, List[MetaFinalItem]]:\n raise NotImplementedError()\n \n @abc.abstractmethod\n def get_patient_id(self, idx: int) -> str:\n raise NotImplementedError()\n \n @abc.abstractmethod\n def patient_has_meta(self, idx: int) -> bool:\n raise NotImplementedError()\n \n @abc.abstractmethod\n def set_transform(self, transform: Transform) -> None:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def get_transform(self) -> Transform:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def get_inverse_transform(self, transform: Optional[Transform] = None) -> Transform:\n raise NotImplementedError()\n\n\nclass MetaDataset(MetaDatasetAbstract, data.Dataset):\n def __init__(self, data_dir: str, reader: MetaReader,\n transform: Optional[Transform] = None, \n device: torch.device = None,\n dtype = torch.float32) -> None:\n self.device = device\n self._reader = self.__get_reader(reader, data_dir)\n self.transform = transform\n self._endload_transform_with_label = Compose([\n EnsureChannelFirstd(keys=[\"image\", \"label\"]),\n ToTensord(keys=[\"image\", \"label\"]),\n ])\n self._endload_transform_without_label = Compose([\n EnsureChannelFirstd(keys=[\"image\"]),\n ToTensord(keys=[\"image\"]),\n ])\n self.dtype = dtype\n\n def __len__(self) -> int:\n return len(self._reader)\n\n def __getitem__(self, idx: Union[int, str]) -> Union[MetaFinalItem, List[MetaFinalItem]]:\n return self.get_item(idx, transform=self.transform)\n\n def get_item(self, idx: Union[int, str], transform: Optional[Transform] = None) -> Union[MetaFinalItem, List[MetaFinalItem]]:\n meta_item = self.get_item_without_transform(idx)\n\n if meta_item.dict_object is None:\n return meta_item\n\n return self.apply_transform(meta_item, transform=transform)\n \n def apply_transform(self, meta_item: MetaIntermediateItem, transform: Optional[Transform] = None) -> Union[MetaFinalItem, List[MetaFinalItem]]:\n dict_object, patient_id, has_meta = meta_item\n\n if self.transform is not None or transform is not None:\n transform = transform or self.transform\n dict_object = transform(dict_object)\n \n return self.apply_end_transformation(MetaIntermediateItem(dict_object, patient_id, has_meta))\n \n def apply_end_transformation(self, meta_item: MetaIntermediateItem) -> Union[MetaFinalItem, List[MetaFinalItem]]:\n dict_object, patient_id, has_meta = meta_item\n\n if isinstance(dict_object, list):\n output = [None]*len(dict_object)\n for ii in range(len(dict_object)):\n output[ii] = MetaFinalItem(\n dict_object[ii][\"image\"],\n dict_object[ii][\"label\"],\n patient_id,\n has_meta\n )\n return output\n \n return MetaFinalItem(\n dict_object[\"image\"],\n dict_object[\"label\"],\n patient_id,\n has_meta\n )\n \n def get_item_without_transform(self, idx: Union[int, str]) -> MetaIntermediateItem:\n if isinstance(idx, int):\n patient_id = self.get_patient_id(idx)\n else:\n patient_id = idx\n \n try:\n dict_object = self._reader.load(patient_id)\n except Exception:\n return MetaIntermediateItem(None, patient_id, False)\n\n # finish to load imgs\n if isinstance(dict_object[\"label\"], str):\n dict_object = self._endload_transform_without_label(dict_object)\n dict_object[\"label\"] = torch.zeros_like(dict_object[\"image\"])\n else:\n dict_object = self._endload_transform_with_label(dict_object)\n\n dict_object[\"image\"] = dict_object[\"image\"].to(dtype=self.dtype)\n dict_object[\"label\"] = dict_object[\"label\"].to(dtype=self.dtype)\n\n # specific transforms\n if len(dict_object[\"image\"].shape) == 3:\n dict_object[\"image\"] = dict_object[\"image\"][None, ...]\n dict_object[\"label\"] = dict_object[\"label\"][None, ...]\n dict_object[\"image\"] = dict_object[\"image\"].permute(0, 2, 1, 3).flip(3)\n dict_object[\"label\"] = dict_object[\"label\"].permute(0, 2, 1, 3).flip(3)\n\n if dict_object[\"label\"].max() > 1.0:\n dict_object[\"label\"] /= dict_object[\"label\"].max()\n\n return MetaIntermediateItem(\n dict_object,\n patient_id,\n self._reader.patient_has_meta_from_id(patient_id)\n )\n \n def __get_reader(self, reader: MetaReader, data_dir: str) -> MetaDatasetReaderAbstract:\n if reader == MetaReader.DICOM:\n return MetaDatasetDicomReader(data_dir)\n elif reader == MetaReader.NIFTI:\n return MetaDatasetOnlyNiftiMetaReader(data_dir)\n elif reader == MetaReader.NIFTI_MULTIPLE_MASKS:\n return MetaDatasetMultipleMasksNiftiReader(data_dir)\n \n raise ValueError(f\"the '{reader}' reader doesn't exist\")\n\n def set_transform(self, transform: Transform) -> None:\n self.transform = transform\n\n def get_transform(self) -> Transform:\n return self.transform\n\n def get_inverse_transform(self, transform: Optional[Transform] = None) -> Transform:\n return Invertd(\n keys=[\"image\", \"label\", \"pred\"],\n transform=transform or self.transform,\n orig_keys=[\"image\", \"label\", \"pred\"],\n meta_keys=[\"image_meta_dict\", \"label_meta_dict\", \"pred_meta_dict\"],\n orig_meta_keys=[\"image_meta_dict\", \"label_meta_dict\", \"pred_meta_dict\"],\n meta_key_postfix=\"meta_dict\",\n nearest_interp=False,\n to_tensor=True,\n )\n\n def get_patient_id(self, idx: int) -> str:\n return self._reader.get_patient_id(idx)\n \n def patient_has_meta(self, idx: int) -> bool:\n return self._reader.patient_has_meta(idx)\n \n def get_patient_ids(self) -> Tuple[pd.Series, pd.Series]:\n return self._reader.get_patient_ids()\n\n\nclass MetaSubset(data.Subset, MetaDatasetAbstract):\n def __init__(self, dataset: MetaDataset, indices: Sequence[str], transform: Optional[Transform] = None):\n super().__init__(dataset, indices)\n self.transform = transform\n\n def __getitem__(self, idx: int) -> Union[MetaFinalItem, List[MetaFinalItem]]:\n return self.dataset.get_item(self.indices[idx], self.transform)\n\n def get_patient_id(self, idx: int) -> str:\n return self.dataset.get_patient_id(idx)\n\n def patient_has_meta(self, idx: int) -> bool:\n return self.dataset.patient_has_meta(idx)\n\n def set_transform(self, transform: Transform) -> None:\n self.transform = transform\n\n def get_transform(self) -> Transform:\n return self.transform\n\n def get_inverse_transform(self) -> Transform:\n return self.dataset.get_inverse_transform(self.transform)\n \n @classmethod\n def from_subset(cls, subset: data.Subset) -> \"MetaSubset\":\n return cls(subset.dataset, subset.indices)\n \n def get_patient_ids(self) -> Tuple[pd.Series, pd.Series]:\n series = self.dataset.get_patient_ids()\n\n indices = []\n for patient_id in self.indices:\n indices.append(series[0].index[series[0] == patient_id][0])\n\n patient_ids = series[0][indices]\n has_meta = series[1][indices]\n\n return (patient_ids, has_meta)\n","repo_name":"VendenIX/BrainMetaSegmentatorUI-Back","sub_path":"App/meta/data/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":8392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"70490459959","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 2 13:33:41 2020\n\n@author: Ben Boys\n\n\nTest matrix vector multiplication kernels\n\"\"\"\nimport pyopencl as cl\nimport numpy as np\nimport sys\nimport pathlib\n\nsys.path.insert(1, pathlib.Path(__file__).parent.absolute() / 'peridynamics/kernels')\nimport time\n\ndef output_device_info(device_id):\n sys.stdout.write(\"Device is \")\n sys.stdout.write(device_id.name)\n if device_id.type == cl.device_type.GPU:\n sys.stdout.write(\"GPU from \")\n elif device_id.type == cl.device_type.CPU:\n sys.stdout.write(\"CPU from \")\n else:\n sys.stdout.write(\"non CPU of GPU processor from \")\n sys.stdout.write(device_id.vendor)\n sys.stdout.write(\" with a max of \")\n sys.stdout.write(str(device_id.max_compute_units))\n sys.stdout.write(\" compute units, \\n\")\n sys.stdout.write(\"a max of \")\n sys.stdout.write(str(device_id.max_work_group_size))\n sys.stdout.write(\" work-items per work-group, \\n\")\n sys.stdout.write(\"a max work item dimensions of \")\n sys.stdout.write(str(device_id.max_work_item_dimensions))\n sys.stdout.write(\", \\na max work item sizes of \")\n sys.stdout.write(str(device_id.max_work_item_sizes))\n sys.stdout.write(\",\\nand device local memory size is \")\n sys.stdout.write(str(device_id.local_mem_size))\n sys.stdout.write(\" bytes. \\n\")\n sys.stdout.flush()\n\nnp.random.seed(69)\n \nx = np.random.normal(0, 1, (513))\nA = np.random.normal(0, 1, (513, 513))\n\nprint(A)\nprint(x.shape)\nprint(A.shape)\n\ny = np.dot(A, x)\n\nh_m = np.intc(\n 1<<(len(x)-1).bit_length()\n )\nh_n = np.intc(len(x))\n\n\nshape = np.shape(A)\npadded_A = np.zeros((h_m, h_n))\npadded_A[:shape[0],:shape[1]] = A\n\nprint(y.shape)\n\n # Initializing OpenCL\ncontext = cl.create_some_context()\nqueue = cl.CommandQueue(context, properties=cl.command_queue_properties.PROFILING_ENABLE) \n\n# Print out device info\noutput_device_info(context.devices[0])\n\n# Build the OpenCL program from file\nkernelsource = open(pathlib.Path(__file__).parent.absolute() / \"kernels/mvmul.cl\").read()\n\n\n# Build the programs\n#program = cl.Program(context, kernelsource).build([options_string])\n\nprogram = cl.Program(context, kernelsource).build()\n\ncl_kernel_matrix_vector_mul = program.gemv1\n\n# Set initial values in host memory\n# horizons and horizons lengths\nh_x = np.ascontiguousarray(x, dtype=np.float64)\nh_A = np.ascontiguousarray(np.transpose(padded_A), dtype=np.float64)\nh_y = np.empty((h_n), dtype=np.float64)\n\nprint(h_n)\nprint(h_m)\n\n # Read only\nd_x = cl.Buffer(context,\n cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,\n hostbuf=h_x)\nd_A = cl.Buffer(context,\n cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,\n hostbuf=h_A)\n\n# Write only\nd_y= cl.Buffer(context, cl.mem_flags.WRITE_ONLY, h_y.nbytes)\n\ncl_kernel_matrix_vector_mul.set_scalar_arg_dtypes(\n [None, None, None, None, None])\n\nstart = cl.enqueue_marker(queue)\n# Calc bond forces\ncl_kernel_matrix_vector_mul(queue, (h_m,), (128,),\n d_A, d_x, d_y, h_m, h_n)\ncl.enqueue_copy(queue, h_y, d_y)\nfinish = cl.enqueue_marker(queue)\n\nprint('Time taken for kernel was', (start.profile.end-finish.profile.start)*1e-9)\n\n\nzeros = np.subtract(h_y, y)\n\nprint(np.max(zeros))\n\n\n","repo_name":"bb515/probabilistic-peridynamics-project","sub_path":"peridynamics/mvect.py","file_name":"mvect.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"24906390735","text":"#\n# please refer to PPT file\n# for exercise\n#\n# Integración numérica\n#\n# Regla del punto medio\n# Regla del trapecio\n# Regla de Simpson\n\nfrom math import sin, sqrt\n\ndef f(x):\n return 2*(sin(sqrt(x)))-x\n\na = 0\nb = 1.9724\n\nm = (a+b)/2\nr2 = f(m)*(b-a)\nprint('Regla del punto medio: ',r2)\n\nr3 = ((b-a)/2)*(f(a)+f(b))\nprint('Regla del trapecio: ',r3)\n\nr4 = ((b-a)/6)*(f(a)+4*f(m)+f(b))\nprint('Regla de Simpson: ',r4)\n\n\n\n","repo_name":"montsegv/NumericalComp","sub_path":"06-Integration/2do/2do1.py","file_name":"2do1.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"28325232942","text":"# coding=utf-8\nimport pymysql\n\n\ndef info_sql():\n conn = pymysql.connect(host='139.155.33.105', port=2707, user='root', password='Liyitong97!', db='contract',\n charset='utf8')\n cursor = conn.cursor()\n sql1 = \"select * FROM info\"\n sql2 = \"SHOW full COLUMNS FROM info\"\n sql3 = \"select * FROM colname\"\n sql4 = \"SHOW full COLUMNS FROM colname\"\n cursor.execute(sql1)\n conn.commit()\n result1 = cursor.fetchall()\n cursor.execute(sql2)\n conn.commit()\n result2 = cursor.fetchall()\n cursor.execute(sql3)\n conn.commit()\n result3 = cursor.fetchall()\n cursor.execute(sql4)\n conn.commit()\n result4 = cursor.fetchall()\n all_list = []\n all_list2 = []\n result2list = []\n result4list = []\n for item in result2:\n result2list.append(item[0])\n for i in range(len(result1)):\n tup_list = list(result1[i])\n all_dict = dict(zip(result2list, tup_list))\n all_list.append(all_dict)\n for item in result4:\n result4list.append(item[0])\n for i in range(len(result3)):\n tup_list = list(result3[i])\n all_dict = dict(zip(result4list, tup_list))\n all_list2.append(all_dict)\n cursor.close()\n conn.close()\n title = []\n title2 = []\n for i in range(len(all_list2)):\n title.append(all_list2[i]['colname'])\n title2.append(all_list2[i]['colnamech'])\n return all_list, title, title2\n\ninfo_sql()\n\n","repo_name":"comtumacy/contract_flask","sub_path":"main/get/info/info_sql.py","file_name":"info_sql.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"28999233875","text":"from turtle import Turtle, Screen\r\nfrom random import choice\r\n\r\ntim = Turtle()\r\ntim.speed(\"fastest\")\r\ntim.hideturtle()\r\n\r\nscreen = Screen()\r\nscreen.colormode(255)\r\n\r\n\r\n# This color list is obtained by running hirst_color.py \r\ncolors = [(229, 160, 61), (49, 101, 147), (11, 128, 91), (206, 113, 154), (138, 20, 60), (158, 154, 30), (219, 69, 107), (176, 40, 74)]\r\n\r\n\r\n# Spinograf(?)\r\n\r\nfor i in range(120):\r\n tim.color(choice(colors))\r\n tim.circle(100)\r\n tim.left(3)\r\n\r\nscreen.resetscreen()\r\n\r\n# Hirst Dot Painting\r\n\r\ntim.penup()\r\n\r\nfor i in range (10):\r\n for j in range(10):\r\n if j == 0:\r\n tim.goto(-235, -235+50*i) # 235 = (50*9+20)/2\r\n else:\r\n tim.setheading(0) # set to the right\r\n tim.forward(50)\r\n tim.dot(20, choice(colors)) #\r\n\r\nscreen.exitonclick()\r\n\r\n\r\n\r\n","repo_name":"mivCalik/python_projects","sub_path":"day-18-turtle-gui/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"18345655653","text":"import csv\nimport random\nimport time\nimport itertools\nimport math\nimport msvcrt as m\nimport numpy as np \nimport matplotlib.pyplot as plt\n\nmin_nr_sample=50 #minimum % number of items in a sample\nmax_nr_sample=100 #minimum % number of items in a sample\nnrtree=10 #number of trees fused in the random forest\nnrsplit=10 #number of splits for comparable variables\nonlyhour=True\ndef read(file):\n '''\n read the data and return it as a list\n Parameters:\n filename\n '''\n with open(file) as f:\n reader = csv.reader(f)\n data = [r for r in reader]\n return data[0],data[1:]\ndef makefloat(data):\n '''\n change str to float\n '''\n for i in range(0,len(data)):\n for j in range(0,len(data[i])):\n if j == 0:\n astr=data[i][j]\n astr=astr.split(' ')\n astr[0]=astr[0].split('-')\n astr[1]=astr[1].split(':')\n if not onlyhour:\n astr=astr[0]+astr[1]\n else:\n astr=astr[1]\n sum=0\n for k in astr:\n sum*=100\n sum+=float(k)\n data[i][j]=sum\n else:\n data[i][j]=float(data[i][j])\n\n\n return None\ndef dropcolumn(data,index):\n '''\n Drops a specific column\n '''\n for row in data:\n row.pop(index)\ndef sample(data,min_nr_sample=50,max_nr_sample=100):\n '''\n sample with replacement of the data\n ''' \n return random.sample(data, random.randint(int(len(data)/100*min_nr_sample), int(len(data)/100*max_nr_sample))) \ndef split(data, func):\n '''\n splits the tree into two parts by the function\n '''\n left,right=[],[]\n for i in data:\n if func(i):\n right.append(i)\n else:\n left.append(i)\n return left,right\ndef error(data):\n avg = 0.0\n for i in data:\n avg+=i[-1]\n avg/= len(data)\n\n error = 0.0\n for i in data:\n error+= (avg - i[-1])**2\n error/= len(data)\n return error\ndef buildtree(data,o_criteria,a_criteria,prev_avg=-1,max_leaf=1,max_depth=-1,sample_size=-1):\n '''\n build a tree with the given subspace \n '''\n if len(data)==0:\n return [prev_avg]\n avg = 0.0\n for i in data:\n avg+=i[-1]\n avg/= len(data)\n if max_depth==0:\n return [avg]\n if len(data)<(max_leaf+1):\n return [avg]\n if sample_size==-1:\n sample_size=random.randint(int(len(o_criteria)/100*75), int(len(o_criteria)/100*100))\n criteria=random.sample(a_criteria ,sample_size)\n if criteria==[]:\n return [avg]\n best_c,best_v,best_e=[\"\",False],9999,float(\"inf\")\n for c in criteria:\n ci=index(o_criteria,c)\n sdata=sorted(data,key=lambda x:x[ci])\n if(c[1]):\n for spliti in range(0,nrsplit-1):\n\n ind=int(len(data)/nrsplit*(spliti+1))\n l,r=split(sdata,lambda x: x[ci] >= data[ind][ci])\n if len(l) == 0:\n terror=error(r)\n elif len(r) == 0:\n terror=error(l)\n else:\n terror=error(l)\n terror+=error(r)\n if(terror=best_v)\n if best_c[0]==\"\":\n return [avg]\n if len(l)==0:\n return buildtree(r,o_criteria,a_criteria,avg,max_leaf,max_depth-1,sample_size)\n if len(r)==0:\n return buildtree(l,o_criteria,a_criteria,avg,max_leaf,max_depth-1,sample_size)\n if(c[1]):\n return [lambda x: x[index(o_criteria,best_c)]>=best_v,\n buildtree(l,o_criteria,a_criteria,avg,max_leaf,max_depth-1, sample_size),\n buildtree(r,o_criteria,a_criteria,avg,max_leaf,max_depth-1,sample_size)]\n else:\n return [lambda x: x[index(o_criteria,best_c)]==best_v, \n buildtree(l,o_criteria,a_criteria,avg,max_leaf,max_depth-1, sample_size), \n buildtree(r,o_criteria,a_criteria,avg,max_leaf,max_depth-1,sample_size)]\n\ndef prediction(trees, item):\n '''\n returns the prediction\n '''\n avg=0.0\n for t in trees:\n avg+=pred_val(t,item)\n avg/=len(trees)\n\n return avg\ndef prediction_int(trees, item):\n '''\n returns the prediction\n '''\n avg=0.0\n for t in trees:\n avg+=pred_val(t,item)\n avg/=len(trees)\n\n return int(round(avg))\n\ndef pred_val(tree, item):\n if len(tree)==1:\n return tree[0]\n if tree[0](item):\n return pred_val(tree[2],item)\n else:\n return pred_val(tree[1],item)\n\ndef index(l,item):\n for i in range(0,len(l)):\n if l[i]==item:\n return i\n return -1\ndef fin_error(data,tree):\n error=0.0\n for i in data:\n error+=(i[-1]-pred_val(tree,i))**2\n error/=len(data)\n return error\ndef fin_error2(data,trees,prediction):\n error=0.0\n for i in data:\n p=prediction(trees,i)\n error+=(i[-1]-p)**2\n error/=len(data)\n return error\ndef biggest_dif(data,tree):\n val,pred=0.0,0.0\n for i in data:\n if abs((i[-1]-pred_val(tree,i)))>abs(val-pred):\n val=i[-1]\n pred=pred_val(tree,i)\n return val,pred\ndef biggest_dif2(data,trees,prediction):\n val,pred=0.0,0.0\n for i in data:\n pr=prediction(trees,i)\n if abs((i[-1]-pr))>abs(val-pred):\n val=i[-1]\n pred=pr\n return val,pred\ndef smallest_dif2(data,trees,prediction):\n val,pred=0.0,100000000000000000000.0\n for i in data:\n pr=prediction(trees,i)\n if abs((i[-1]-pr)) Stream hold - a stream manager has temporarily halted RPC work on the doc and will let us know when we can start on it again\n# Missing norm ref - the document is part of a cluster and one of its normative references is not in the queue yet\n# IANA action - RPC is waiting for IANA to update or create the registry for this doc\n# Informational labels to help with assignments:\n# IANA Considerations - the document has an IANA Considerations section\n# ABNF - the document contains ABNF sourcecode\n# Needs Formatting - the document requires an XML expert to format complex tables, nested lists, etc.\n LabelFactory(slug=\"Stream hold\", is_exception=True, color=\"yellow\")\n LabelFactory(slug=\"Missing norm ref\", is_exception=True, color=\"pink\")\n LabelFactory(slug=\"IANA action\", is_exception=True, color=\"rose\")\n LabelFactory(slug=\"IANA Consideration\", color=\"neutral\")\n LabelFactory(slug=\"ABNF\", color=\"emerald\")\n LabelFactory(slug=\"Needs Formatting\", color=\"indigo\")\n\n # Draft sent to RPC and in progress as an RfcToBe\n rfctobe = self._demo_rfctobe_factory(\n rpcapi=rpcapi,\n name=\"draft-ietf-tasty-cheese\",\n rev=\"00\",\n states=[(\"draft-iesg\", \"rfcqueue\")],\n )\n rfctobe.labels.add(LabelFactory(slug=\"delicious\"))\n rfctobe.labels.add(Label.objects.get(slug=\"Missing norm ref\"))\n AssignmentFactory(\n rfc_to_be=RfcToBe.objects.get(draft__name=\"draft-ietf-tasty-cheese\"),\n role__slug=\"first_editor\",\n person=self.people[\"atravis\"],\n state=\"assigned\",\n )\n AssignmentFactory(\n rfc_to_be=RfcToBe.objects.get(draft__name=\"draft-ietf-tasty-cheese\"),\n role__slug=\"formatting\",\n person=self.people[\"kstrawberry\"],\n state=\"in progress\",\n )\n\n rfctobe = self._demo_rfctobe_factory(\n rpcapi=rpcapi,\n name=\"draft-ietf-where-is-my-hat\",\n rev=\"04\",\n states=[(\"draft-iesg\", \"rfcqueue\")],\n )\n rfctobe.labels.add(LabelFactory(slug=\"is_a_trap\", is_exception=True, color=\"red\"))\n AssignmentFactory(\n rfc_to_be=RfcToBe.objects.get(draft__name=\"draft-ietf-where-is-my-hat\"),\n role__slug=\"second_editor\",\n person=self.people[\"sbexar\"],\n state=\"in progress\",\n )\n\n self._demo_rfctobe_factory(\n rpcapi=rpcapi,\n name=\"draft-irtf-improving-lizard-qol\",\n rev=\"07\",\n stream=\"irtf\",\n states=[(\"draft-iesg\", \"idexists\")],\n )\n AssignmentFactory(\n rfc_to_be=RfcToBe.objects.get(\n draft__name=\"draft-irtf-improving-lizard-qol\"\n ),\n role__slug=\"final_review_editor\",\n person=self.people[\"sbexar\"],\n state=\"assigned\",\n )\n RfcToBeActionHolderFactory(\n target_rfctobe=RfcToBe.objects.get(\n draft__name=\"draft-irtf-improving-lizard-qol\"\n ),\n datatracker_person__datatracker_id=rpcapi.create_demo_person(\n rpcapi_client.CreateDemoPersonRequest(name=\"Artimus Ad\"),\n ).person_pk,\n deadline=datetime.datetime.now(datetime.timezone.utc)+datetime.timedelta(days=14)\n )\n\n #\n # # Draft published as an RFC\n # rfc_number = next_rfc_number()[0]\n # RfcToBeFactory(\n # disposition__slug=\"published\",\n # rfc_number=rfc_number,\n # draft=WgRfcFactory(alias2__name=f\"rfc{rfc_number}\")\n # )\n\n @with_rpcapi\n def _demo_rfctobe_factory(\n self,\n *,\n rpcapi: rpcapi_client.DefaultApi,\n name,\n rev,\n states=None,\n stream=\"ietf\",\n **kwargs,\n ):\n \"\"\"Create a document on the back end and generate an RfcToBe linked to it\n\n **kwargs are passed through to the RfcToBeFactory\n \"\"\"\n resp = rpcapi.create_demo_draft(\n rpcapi_client.CreateDemoDraftRequest(\n name=name, rev=rev, states=states, stream=stream\n )\n )\n dtdoc = rpcapi.get_draft_by_id(resp.doc_id)\n try:\n rfctobe = RfcToBeFactory(\n **kwargs,\n draft__datatracker_id=dtdoc.id,\n draft__name=dtdoc.name,\n draft__rev=dtdoc.rev,\n draft__title=dtdoc.title,\n draft__stream=dtdoc.stream,\n draft__pages=dtdoc.pages,\n )\n return rfctobe\n except IntegrityError:\n print(\n f\">>> Warning: Failed to create RfcToBe for {dtdoc.name}, already exists?\"\n )\n","repo_name":"ietf-tools/rpc","sub_path":"rpc/management/commands/create_rpc_demo.py","file_name":"create_rpc_demo.py","file_ext":"py","file_size_in_byte":13922,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"74593623795","text":"import logging\nimport typing\nimport copy\n\nfrom ..domain import stub_domain\n\nlogger = logging.getLogger(__name__)\n\n\nclass Repository(stub_domain.Repository):\n def __init__(self) -> None:\n self._entity_map: typing.Dict[str, stub_domain.Entity] = {}\n\n async def get(\n self,\n id: stub_domain.Id,\n ) -> typing.Optional[stub_domain.Entity]:\n stubs = await self.list(id=id)\n if stubs:\n return stubs[0]\n else:\n return None\n\n async def list(\n self,\n id: typing.Union[stub_domain.Id, typing.List[stub_domain.Id], None] = None,\n ) -> typing.List[stub_domain.Entity]:\n if id is None:\n ids = None\n else:\n ids = [i.value for i in (id if isinstance(id, list) else [id])]\n\n entities = []\n for entity_id, entity in self._entity_map.items():\n if ids is not None:\n if entity_id not in ids:\n continue\n entities.append(copy.deepcopy(entity))\n\n return entities\n\n async def save(\n self,\n entity: typing.Union[stub_domain.Entity, typing.List[stub_domain.Entity]],\n ) -> None:\n if isinstance(entity, list):\n _entities = entity\n else:\n _entities = [entity]\n\n for _entity in _entities:\n self._entity_map[_entity.id.value] = copy.deepcopy(_entity)\n\n async def add(\n self,\n entity: typing.Union[stub_domain.Entity, typing.List[stub_domain.Entity]],\n ) -> None:\n if isinstance(entity, list):\n _entities = entity\n else:\n _entities = [entity]\n\n for _entity in _entities:\n if _entity.id.value in self._entity_map:\n raise ValueError(f\"'{_entity.id.value}' already exists.\")\n\n await self.save(entity=_entities)\n\n async def update(\n self,\n entity: typing.Union[stub_domain.Entity, typing.List[stub_domain.Entity]],\n ) -> None:\n if isinstance(entity, list):\n _entities = copy.deepcopy(entity)\n else:\n _entities = [copy.deepcopy(entity)]\n\n for _entity in _entities:\n if _entity.id.value not in self._entity_map:\n raise ValueError(f\"'{_entity.id.value}' does not exist.\")\n\n await self.save(entity=_entities)\n\n async def remove(\n self,\n entity: typing.Union[stub_domain.Entity, typing.List[stub_domain.Entity]],\n ) -> None:\n if isinstance(entity, list):\n _entities = copy.deepcopy(entity)\n else:\n _entities = [copy.deepcopy(entity)]\n\n for _entity in _entities:\n if _entity.id.value not in self._entity_map:\n raise ValueError(f\"'{_entity.id.value}' does not exist.\")\n\n for _entity in _entities:\n del self._entity_map[_entity.id.value]\n\n async def remove_all(self) -> None:\n entities = await self.list()\n await self.remove(entity=entities)\n","repo_name":"qmonus/net-faker","sub_path":"src/qmonus_net_faker/infrastructure/stub_infrastructure.py","file_name":"stub_infrastructure.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"39571288883","text":"import frappe\nfrom frappe import _\n\ndef execute(filters=None):\n\tfilters = frappe._dict(filters or {})\n\tcolumns = get_columns(filters)\n\tdata_list = get_data(filters)\n\tdata = []\n\tfor i in data_list:\n\t\trow = ({\n\t\t\t \"name\":i.name,\n\t \"project\":i.project,\n\t \"project_structure\":i.project_structure,\n\t \"item_of_work\":i.item_of_work,\n\t \"estimate_quantity\":i.estimate_quantity,\n\t \"excess_quantity\":i.excess_quantity,\n\t \"est_total_qty\":i.est_total_qty,\n\t \"percent\": (i.excess_quantity/i.estimate_quantity) * 100\n\t\t\t})\n\t\tdata.append(row)\n\treturn columns, data\n\n\n\ndef get_columns(filters):\n\treturn[\n\t {\n\t 'label': _('Project'),\n\t 'fieldtype': 'Data',\n\t 'fieldname': 'project',\n\t 'width': 300,\n\t },\n\t {\n\t 'label': _('Project Structure'),\n\t 'fieldtype': 'Data',\n\t 'fieldname': 'project_structure',\n\t 'width': 300,\n\t },\n\t {\n\t 'label': _('Item Of Work'),\n\t 'fieldtype': 'Data',\n\t 'fieldname': 'item_of_work',\n\t 'width': 300,\n\t },\n\t {\n\t 'label': _('Percentage'),\n\t 'fieldtype': 'Percent',\n\t 'fieldname': 'percent',\n\t 'width': 300,\n\t }\n\n\t ]\n\ndef get_data(filters):\n\tconditions = get_conditions(filters)\n\treturn frappe.db.sql(\"\"\" with temp as \n\n\n\n\t (SELECT boq.name as name,\n\t boq.project as project,\n\t boq.project_structure as project_structure,\n\t boq.item_of_work as item_of_work,\n\t boq.estimate_quantity as estimate_quantity,\n\t boq.excess_quantity as excess_quantity,\n\t boq.est_total_qty as est_total_qty\n\n\t FROM `tabBOQ` boq \n\n\t WHERE boq.docstatus = 1) \n\n\n\t SELECT \n\t name,\n\t project,\n\t project_structure,\n\t item_of_work,\n\t estimate_quantity,\n\t excess_quantity,\n\t est_total_qty \n\t FROM \n\t temp\n\t %s \"\"\" % conditions,filters, as_dict=1)\n\n\ndef get_conditions(filters):\n\tconditions = \"\"\n\tif filters.get(\"project\"):\n\t\tconditions +=\" and boq.project = %(project)s\"\n\tif filters.get(\"project_structure\"):\n\t\tconditions +=\" and boq.project_structure = %(project_structure)s\"\n\tif filters.get(\"item_of_work\"):\n\t\tconditions +=\" and boq.item_of_work = %(item_of_work)s\"\t\n\treturn conditions\n","repo_name":"sowmyarajshree/jenkins-app","sub_path":"M4/construction/construction/report/boq_quantity_analysis/boq_quantity_analysis.py","file_name":"boq_quantity_analysis.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"7634724031","text":"import flopt\nfrom flopt.variable import VarElement\nfrom flopt.expression import Expression, Reduction, Const\nfrom flopt.constants import VariableType\n\n\ndef binarize(prob):\n \"\"\"binarize of problem\n\n Parameters\n ----------\n prob : Problem\n\n Examples\n --------\n .. code-block:: python\n\n import flopt\n\n x = flopt.Variable.array('x', 2, cat='Binary')\n y = flopt.Variable('y', lowBound=1, upBound=3, cat='Integer')\n\n prob = flopt.Problem()\n prob += y * x[0] + x[1]\n\n print('[ original ]')\n prob.show()\n >>> [ original ]\n >>> Name: None\n >>> Type : Problem\n >>> sense : minimize\n >>> objective : y_0*x_0+x_1\n >>> #constraints : 0\n >>> #variables : 3 (Binary 2, Integer 1)\n\n from flopt.convert import linearize, binarize\n\n binarize(prob)\n\n print('[ binarized ]')\n prob.show()\n >>> [ binarized ]\n >>> Name: None\n >>> Type : Problem\n >>> sense : minimize\n >>> objective : x_0*(1*bin_y_0_0+2*bin_y_0_1+3*bin_y_0_2)+x_1\n >>> #constraints : 2\n >>> #variables : 6 (Binary 5, Integer 1)\n\n >>> C 0, name for_bin_y_0_sum, bin_y_0_0+bin_y_0_2+bin_y_0_1-1 == 0\n >>> C 1, name for_bin_y_0_eq, y_0-(1*bin_y_0_0+2*bin_y_0_1+3*bin_y_0_2) == 0\n\n linearize(prob)\n\n print('[ linearized ]')\n prob.show()\n >>> [ linearized ]\n >>> Name: None\n >>> Type : Problem\n >>> sense : minimize\n >>> objective : mul_0+2*mul_1+3*mul_2+x_1\n >>> #constraints : 11\n >>> #variables : 9 (Binary 8, Integer 1)\n\n >>> C 0, name for_bin_y_0_sum, bin_y_0_0+bin_y_0_1+bin_y_0_2-1 == 0\n >>> C 1, name for_bin_y_0_eq, -bin_y_0_0-(2*bin_y_0_1)-(3*bin_y_0_2)+y_0 == 0\n >>> C 2, name for_mul_0_1, mul_0-bin_y_0_0 <= 0\n >>> C 3, name for_mul_0_2, mul_0-x_0 <= 0\n >>> C 4, name for_mul_0_3, mul_0-(bin_y_0_0+x_0-1) >= 0\n >>> C 5, name for_mul_1_1, mul_1-bin_y_0_1 <= 0\n >>> C 6, name for_mul_1_2, mul_1-x_0 <= 0\n >>> C 7, name for_mul_1_3, mul_1-(bin_y_0_1+x_0-1) >= 0\n >>> C 8, name for_mul_2_1, mul_2-bin_y_0_2 <= 0\n >>> C 9, name for_mul_2_2, mul_2-x_0 <= 0\n >>> C 10, name for_mul_2_3, mul_2-(bin_y_0_2+x_0-1) >= 0\n \"\"\"\n binarizes = {}\n prob.obj = binarize_expression(prob.obj, binarizes)\n for const in prob.getConstraints():\n const.expression = binarize_expression(const.expression, binarizes)\n\n for source, binaries in binarizes.items():\n prob += flopt.Sum(binaries) == 1, f\"for_bin_{source.name}_sum\"\n prob += source == source.toBinary(), f\"for_bin_{source.name}_eq\"\n\n return prob\n\n\ndef binarize_expression(e, binarizes):\n \"\"\"binarize a expression\n\n Parameters\n ----------\n e : Expression or Reduction or Const\n binarizes : dict\n binarizes[var] = binaries, where var = sum(i*var_bin)\n \"\"\"\n assert isinstance(e, (Expression, Reduction, Const))\n if isinstance(e, Const):\n return e\n e = e.expand() # convert reduction obj to Expression\n e.resetlinkChildren()\n\n finish = False\n while not finish:\n finish = not binarize_traverse(e, binarizes)\n return e\n\n\ndef binarize_traverse(e, binarizes):\n \"\"\"subroutine of binarize_expression\n\n Parameters\n ----------\n e : Expression\n binarizes : dict\n binarizes[var] = binaries, where var = sum(i*var_bin)\n\n Returns\n -------\n bool\n return true if a expession is linearized else false\n \"\"\"\n assert isinstance(e, Expression)\n for node in e.traverse():\n if isinstance(node, Expression):\n update = False\n if node.elmA.type() == VariableType.Integer:\n if node.elmA not in binarizes:\n binarizes[node.elmA] = list(node.elmA.getBinaries())\n node.elmA = node.elmA.toBinary()\n node.elmA.parents.append(node)\n update = True\n elif node.elmA.type() == VariableType.Spin:\n node.elmA = node.elmA.toBinary()\n node.elmA.parents.append(node)\n update = True\n if node.elmB.type() == VariableType.Integer:\n if node.elmB not in binarizes:\n binarizes[node.elmB] = list(node.elmB.getBinaries())\n node.elmB = node.elmB.toBinary()\n node.elmB.parents.append(node)\n update = True\n elif node.elmB.type() == VariableType.Spin:\n node.elmB = node.elmB.toBinary()\n node.elmB.parents.append(node)\n update = True\n if update:\n node.resetName()\n node.polynomial = None\n for parent in node.traverseAncestors():\n parent.resetName()\n parent.polynomial = None\n return True\n return False\n","repo_name":"nariaki3551/flopt","sub_path":"flopt/convert/binarize.py","file_name":"binarize.py","file_ext":"py","file_size_in_byte":5072,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"4"} +{"seq_id":"11381700262","text":"inf = []\r\nword = input('Введите латинскую словоформу: ')\r\nword = word.strip()\r\nwhile word != '':\r\n word = word.lower()\r\n if word.find(' ') == -1:\r\n if (word[-3:] == 'ire') or (word[-3:] == 'are') or (word[-3:] == 'ere') or (word[-1] == 'i') or (word[-4:] == 'isse'): \r\n inf.append(word) \r\n else:\r\n part_1 = word[:word.find(' ')]\r\n part_2 = word[word.find(' '):]\r\n part_2 = part_2.strip()\r\n if part_2.find(' ') == -1:\r\n if (part_2[-3:] == 'iri') and (part_1[-2:] == 'um'):\r\n inf.append(part_1 + ' ' + part_2)\r\n elif (part_2[-4:] == 'esse') and ((part_1[-1] == 'a') or (part_1[-2:] == 'um') or (part_1[-2:] == 'us')):\r\n inf.append(part_1 + ' ' + part_2) \r\n word = input('Введите латинскую словоформу: ')\r\n word = word.strip()\r\nfor i in range(len(inf)):\r\n print(inf[i]) \r\n","repo_name":"Filaona/Homeworks","sub_path":"python_variant4/hw_3/python_hw_3.py","file_name":"python_hw_3.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"1445136168","text":"import sys\ns = input()\nfor i in range(len(s)):\n cut = s[len(s)-i:]\n remain = s[:len(s)-i]\n r = \"\"\n for c in remain:\n if c!= \"a\":\n r+=c\n if r == cut:\n print(remain)\n sys.exit()\nprint(\":(\")","repo_name":"kavandoctor1/codeforces","sub_path":"problemset/1146/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"40579783335","text":"from mcpi.minecraft import Minecraft\r\nmc = Minecraft.create()\r\n\r\n# Create a building.\r\n##x = 184\r\n##y = 63\r\n##z = 531\r\n##\r\n##mc.setBlocks(x, y, z, x + 6, y + 6, z + 10, 4)\r\n##mc.setBlocks(x + 1, y + 1, z + 1, x + 5, y + 5, z + 9, 0)\r\n\r\n\r\n# Open secret door.\r\ngift = 57\r\nblock = mc.getBlock(188, 64, 530)\r\n\r\nif block != gift:\r\n pos = mc.player.getTilePos()\r\n mc.setBlock(pos.x, pos.y, pos.z, 10)\r\nelif block == gift:\r\n mc.setBlock(187, 63, 531, 0)\r\n mc.setBlock(187, 64, 531, 0)\r\n mc.setBlock(187, 65, 531, 0)\r\nelse:\r\n mc.postToChat(\"Place an offering on the pedestal.\")\r\n \r\n","repo_name":"mimi1987/Minecraft-Python","sub_path":"secret_door.py","file_name":"secret_door.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"4949748866","text":"#!/usr/bin/env python\n\nimport os\n\ndef java_type_name(type_name):\n t = type_name.split('_')\n return ''.join([s.title() for s in t])\n\ndef java_field_name(field_name):\n t = field_name.split('_')\n if len(t) == 1:\n return field_name\n else:\n return t[0] + ''.join([s.title() for s in t[1:]])\n\ndef is_primative(t):\n return t in ['boolean', 'double', 'int', 'long', 'String']\n\nclass Field:\n def __init__(self, f_type, f_name, is_array):\n self.is_array = is_array\n self.data = {\n 'f_type': f_type, 'f_name': java_field_name(f_name),\n 'f_type_title': f_type.title(),\n 'k_name': f_name\n }\n\n def definition(self):\n if self.is_array:\n return 'public %(f_type)s[] %(f_name)s;' % self.data\n else:\n return 'public %(f_type)s %(f_name)s;' % self.data\n\nclass PrimativeField(Field):\n def read_from_json(self):\n if self.is_array:\n return '''\n if (data.has(\"%(k_name)s\")) {\n org.json.JSONArray arr = data.getJSONArray(\"%(k_name)s\");\n int len = arr.length();\n %(f_name)s = new %(f_type)s[len];\n for (int i = 0 ; i < len ; i++) {\n %(f_name)s[i] = arr.get%(f_type_title)s(i);\n }\n }''' % self.data\n else:\n return '%(f_name)s = data.opt%(f_type_title)s(\"%(k_name)s\");' % self.data\n\n def read_from_parcel(self):\n if self.is_array:\n return '%(f_name)s = in.create%(f_type_title)sArray();' % self.data\n else:\n return '%(f_name)s = in.read%(f_type_title)s();' % self.data\n\n def write_to_parcel(self):\n if self.is_array:\n return 'dest.write%(f_type_title)sArray(%(f_name)s);' % self.data\n else:\n return 'dest.write%(f_type_title)s(%(f_name)s);' % self.data\nclass BooleanField(PrimativeField):\n def read_from_parcel(self):\n if self.is_array:\n return '''\n int[] _%(f_name)s = in.createIntArray();\n if (_%(f_name)s != null) {\n %(f_name)s = new boolean[_%(f_name)s.length];\n for (int i = 0 ; i < _%(f_name)s.length ; i++) {\n %(f_name)s[i] = (_%(f_name)s[i] != 0);\n }\n }''' % self.data\n else:\n return '%(f_name)s = (in.readInt() != 0);' % self.data\n\n def write_to_parcel(self):\n if self.is_array:\n return '''\n if (%(f_name)s == null){\n dest.writeIntArray((int[])null);\n } else {\n int[] _%(f_name)s = new int[%(f_name)s.length];\n for (int i = 0 ; i < %(f_name)s.length ; i++) {\n _%(f_name)s[i] = (%(f_name)s[i] ? 1 : 0);\n }\n dest.writeIntArray(_%(f_name)s);\n }''' % self.data\n else:\n return 'dest.writeInt(%(f_name)s ? 1 : 0);' % self.data\n\nclass ObjectField(Field):\n\n def read_from_json(self):\n if self.is_array:\n return '%(f_name)s = data.has(\"%(k_name)s\") ? %(f_type)s.loadArray(data.getJSONArray(\"%(k_name)s\")) : null;' % self.data\n else:\n return '%(f_name)s = data.has(\"%(k_name)s\") ? new %(f_type)s(data.getJSONObject(\"%(k_name)s\")) : null;' % self.data\n\n def read_from_parcel(self):\n if self.is_array:\n return '%(f_name)s = (%(f_type)s[])in.readParcelableArray(null);' % self.data\n else:\n return '%(f_name)s = (%(f_type)s)in.readParcelable(null);' % self.data\n\n def write_to_parcel(self):\n if self.is_array:\n return 'dest.writeParcelableArray(%(f_name)s, flags);' % self.data\n else:\n return 'dest.writeParcelable(%(f_name)s, flags);' % self.data\n\nclass VO:\n def __init__(self, name):\n self.name = name\n self.fields = []\n\n def add_field(self, f):\n self.fields.append(f)\n\n def gen_file(self, directory, package, parent_class, meta):\n type_name = java_type_name(self.name)\n file_path = os.path.join(directory, type_name + '.java')\n\n _data = {'type_name': type_name}\n\n out = open(file_path, 'w')\n\n print >> out, '''\npackage %s;\n\nimport android.os.Parcel;\nimport android.os.Parcelable;\n\n'''% package\n\n # write class name\n if parent_class:\n print >> out, 'public class %s extends %s implements Parcelable {' % (type_name, parent_class)\n else:\n print >> out, 'public class %s implements Parcelable {' % type_name\n\n # write fields definitions\n for field in self.fields:\n print >> out, ' %s' % field.definition()\n \n # default constructor\n print >> out, ' public %s() {}' % type_name\n\n # construct from json\n print >> out, ' public %s(org.json.JSONObject data) throws org.json.JSONException {' % type_name\n for field in self.fields:\n print >> out, ' %s' % field.read_from_json()\n print >> out, ' }'\n\n # construct from Parcel\n print >> out, ' private %s(Parcel in) {' % type_name\n for field in self.fields:\n print >> out, ' %s' % field.read_from_parcel()\n print >> out, ' }'\n\n # write to Parcel\n print >> out, ' public void writeToParcel(Parcel dest, int flags) {'\n for field in self.fields:\n print >> out, ' %s' % field.write_to_parcel()\n print >> out, ' }'\n\n # 1. load array from json array\n # 2. Parcelable implementation\n print >> out, '''\n public static %(type_name)s[] loadArray(org.json.JSONArray array) throws org.json.JSONException {\n int len = array.length();\n %(type_name)s[] result = new %(type_name)s[len];\n if (len > 0) {\n for (int i = 0; i < len; i++) {\n org.json.JSONObject o = array.getJSONObject(i);\n result[i] = new %(type_name)s(o);\n }\n }\n return result;\n\n }\n\n public int describeContents() {\n return getClass().getName().hashCode();\n }\n\n public static final Parcelable.Creator<%(type_name)s> CREATOR = new Parcelable.Creator<%(type_name)s>() {\n public %(type_name)s createFromParcel(Parcel in) {\n return new %(type_name)s(in);\n }\n\n public %(type_name)s[] newArray(int size) {\n return new %(type_name)s[size];\n }\n };''' % _data\n\n\n print >> out, '} // end class'\n\n out.close()\n\nclass AndroidObjects:\n def __init__(self):\n self.meta = {}\n self.package = ''\n self.parent_class = ''\n self.objects = []\n\n def parse(self, lines):\n for line in lines:\n self._parse(line.strip())\n\n if self.package == '':\n raise Exception('you must specify package')\n\n if len(self.objects) == 0:\n raise Exception('no objects found')\n\n def _parse(self, line):\n if not line: return\n\n c = line[0]\n if c == '#':\n t = line[1:].split()\n if hasattr(self, t[0]):\n setattr(self, t[0], t[1])\n else:\n self.meta[t[0]] = t[1]\n elif c == '@':\n vo = VO(line[1:])\n self.objects.append(vo)\n else:\n vo = self.objects[-1]\n t = line.split()\n f_type = t[0]\n if f_type.endswith('[]'):\n is_array = True\n f_type = f_type[:-2]\n else:\n is_array = False\n\n if f_type in ['double', 'int', 'long', 'String']:\n clz = PrimativeField\n elif f_type == 'boolean':\n clz = BooleanField\n else:\n clz = ObjectField\n f_type = f_type.title()\n\n for f_name in t[1:]:\n vo.add_field(clz(f_type, f_name, is_array))\n\n def gen_files(self, directory):\n if not os.path.exists(directory):\n raise Exception('directory not found')\n\n file_directory = os.path.join(directory, self.package.replace('.', os.path.sep))\n if not os.path.exists(file_directory):\n os.makedirs(file_directory)\n\n for vo in self.objects:\n vo.gen_file(file_directory, self.package, self.parent_class, self.meta)\n\n\nif __name__ == '__main__':\n import sys\n objects_file = sys.argv[1]\n target_directory = sys.argv[2]\n\n ao = AndroidObjects()\n lines = open(objects_file)\n ao.parse(lines)\n lines.close()\n\n ao.gen_files(target_directory)\n","repo_name":"xingfei/droid-side","sub_path":"src/android_object.py","file_name":"android_object.py","file_ext":"py","file_size_in_byte":8513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"43575964517","text":"import tensorflow as tf\nfrom .unet_parts import *\n\n\nclass UNet(tf.keras.Model):\n def __init__(self,n_classes,\n height, width,\n known_n_points=None, ngf=64):\n super(UNet, self).__init__()\n\n\n # With this network depth, there is a minimum image size\n if height < 256 or width < 256:\n raise ValueError('Minimum input image size is 256x256, got {}x{}'.\\\n format(height, width))\n\n self.inc = inconv(ngf)\n self.down1 = down(ngf*2)\n self.down2 = down(ngf*2**2)\n self.down3 = down(ngf*2**3)\n self.down4 = down(ngf*2**3)\n self.down5 = down(ngf*2**3)\n self.down6 = down(ngf*2**3)\n self.down7 = down(ngf*2**3)\n self.down8 = down(ngf*2**3, normaliz=False)\n self.up1 = up(ngf*2**3)\n self.up2 = up(ngf*2**3)\n self.up3 = up(ngf*2**3)\n self.up4 = up(ngf*2**3)\n self.up5 = up(ngf*2**2)\n self.up6 = up(ngf*2)\n self.up7 = up(ngf)\n self.up8 = up(ngf, activ=False)\n self.outc = outconv(n_classes)\n self.out_nonlin = tf.keras.layers.Activation('sigmoid')\n \n\n def call(self, x):\n batch_size = x.shape[0]\n x1 = self.inc(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n x6 = self.down5(x5)\n x7 = self.down6(x6)\n x8 = self.down7(x7)\n x9 = self.down8(x8)\n x = self.up1(x9, x8)\n x = self.up2(x, x7)\n x = self.up3(x, x6)\n x = self.up4(x, x5)\n x = self.up5(x, x4)\n x = self.up6(x, x3)\n x = self.up7(x, x2)\n x = self.up8(x, x1)\n x = self.outc(x)\n x = self.out_nonlin(x)\n\n # Reshape Bx1xHxW -> BxHxW\n # because probability map is real-valued by definition\n x = tf.squeeze(x)\n return x\n\n\n","repo_name":"anhvth/object-locator","sub_path":"models/unet_model.py","file_name":"unet_model.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"4"} +{"seq_id":"3845312872","text":"from gerar_par_de_chaves import gerar_keys\nfrom MsgHandler import msg_encrypt, msg_decrypt\n\n# Titulo\nprint('*'*15, \"Sistema de Mensagens\", '*'*15)\n\n# Gerando chaves\na, b = gerar_keys()\npub_key = {'e': a[0], 'n': a[1]}\npriv_key = {'d': b[0], 'n': b[1]}\n\n# Recebendo mensagem\nmessage = input(\"Digite sua mensagem: \")\n\n# Encriptando mensagem\nencrypted_menssage = msg_encrypt(message,pub_key)\nprint(encrypted_menssage)\n\n# Desencriptando mensagem\ndecrypted_menssage = msg_decrypt(encrypted_menssage,priv_key)\nprint(decrypted_menssage)\n","repo_name":"YamSol/encrypted-msg-system","sub_path":"key_generation/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"5404756919","text":"import time\nimport re\nfrom collections import Counter\nimport colorsys\nfrom urllib.parse import urlparse\nfrom bs4 import BeautifulSoup\nimport requests\n\ndef extract_colors(url, css_content):\n response = requests.get(url)\n html = response.text\n\n # Parse the HTML with BeautifulSoup\n soup = BeautifulSoup(html, 'html.parser')\n \n domain = urlparse(url).netloc\n color_patterns = {\n r\"#([0-9a-fA-F]{6})\": \"hex\",\n r\"rgb\\((\\d{1,3}),\\s*(\\d{1,3}),\\s*(\\d{1,3})\\)\": \"rgb\",\n r\"rgba\\((\\d{1,3}),\\s*(\\d{1,3}),\\s*(\\d{1,3}),\\s*((\\d*(\\.\\d+)?)|1\\.0+)\\)\": \"rgba\",\n }\n background_pattern = r\"background(-color)?\\s*:\\s*(#([0-9a-fA-F]{6})|rgb\\((\\d{1,3}),\\s*(\\d{1,3}),\\s*(\\d{1,3})\\)|rgba\\((\\d{1,3}),\\s*(\\d{1,3}),\\s*(\\d{1,3}),\\s*((\\d*(\\.\\d+)?)|1\\.0+)\\));\"\n font_pattern = r\"font-family\\s*:\\s*(.*?);\"\n color_counts = Counter()\n background_counts = Counter()\n font_counts = Counter()\n\n # Check for background color\n print(f\"Searching for Background Colors in {domain}...\")\n time.sleep(1)\n if \"background\" in css_content or \"background-color\" in css_content:\n background_matches = re.findall(background_pattern, css_content)\n if background_matches:\n background_counts.update(background_matches)\n for color, count in background_counts.most_common(2):\n background_color = color[1]\n print(f\"Background color: {background_color}, Count: {count}\")\n\n # Check for font family\n print(f\"Searching for Font Family in {domain}...\")\n time.sleep(1)\n if \"font-family\" in css_content:\n font_matches = re.findall(font_pattern, css_content)\n if font_matches:\n font_counts.update(font_matches)\n for font, count in font_counts.most_common(2):\n print(f\"Font Family: {font}, Count: {count}\")\n\n for pattern, color_type in color_patterns.items():\n matches = re.findall(pattern, css_content)\n if matches:\n print(f\"{color_type} Color codes found in the CSS file:\")\n time.sleep(2)\n\n # Convert RGBA colors to RGB\n if color_type == \"rgba\":\n matches = [(*map(int, match[:3]), float(match[3])) for match in matches]\n\n color_counts.update(matches)\n\n for color, count in color_counts.most_common(2):\n if color_type == \"hex\":\n print(f\"Color code: #{color}, Count: {count}\")\n elif len(color) == 4:\n print(f\"Color code: {color_type}({', '.join(map(str, color))}), Count: {count}\")\n\n # Save colors in a txt file\n with open(f\"colors/{domain}_{color_type}_colors.txt\", \"a\") as f:\n for color, count in color_counts.most_common(2):\n if color_type == \"hex\":\n f.write(f\"Color code: #{color}, Count: {count}\\n\")\n print(f\"Color code: #{color}, Count: {count}\")\n elif len(color) == 4:\n f.write(f\"Color code: {color_type}({', '.join(map(str, color))}), Count: {count}\\n\")\n print(f\"Color code: {color_type}({', '.join(map(str, color))}), Count: {count}\")\n\n else:\n print(f\"{color_type} not found\")\n # Search for font-family in css\n font_family_pattern = r\"font-family\\s*:\\s*([\\w\\s\\',-]+)\\s*;\"\n font_family_matches = re.findall(font_family_pattern, css_content)\n if font_family_matches:\n print(\"Font Families found in the CSS file:\")\n time.sleep(2)\n font_family_counts = Counter(font_family_matches)\n for font_family, count in font_family_counts.most_common(1):\n # Save font family in a txt file\n with open(f\"fonts/{domain}_font_family.txt\", \"a\") as f:\n f.write(f\"Font Family: {font_family}, Count: {count}\\n\")\n else:\n print(\"Font Family not found\")\n\n\n\n\n","repo_name":"gingapower/artiPHISHial","sub_path":"frontend/backend/colors.py","file_name":"colors.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"37581958636","text":"def isint(n):\n try:\n int(n)\n return True\n except ValueError:\n return False\n\ndef Part1(data):\n commands = \" \".join(data).split(\" \")\n X = 1\n strength = []\n for i,c in enumerate(commands,1):\n if i%40 == 20: # 20,60,100,...\n strength.append(i*X)\n # Since addx is the only nontrivial instruction\n if isint(c):\n X += int(c)\n return sum(strength[:6])\n\ndef Part2(data):\n commands = \" \".join(data).split(\" \")\n X = 1\n TV = ''\n for i,c in enumerate(commands):\n if i%40 == 0:\n TV += '\\n\\t'\n\n if X-1 <= (i%40) <= X+1:\n TV += '██'\n else:\n TV += ' '\n # Since addx is the only nontrivial instruction\n if isint(c):\n X += int(c)\n return TV[2:] # main.py already prints a leading tab","repo_name":"3saster/advent-of-code-2022","sub_path":"Day 10/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"32842834928","text":"from IPython.core.display import display, HTML\nimport etude\nfrom etude.run_exercise_magic import add_magic\nimport nbformat.v4 as nbf\nimport os\nimport importlib\nimport textwrap\n\n\ndef convert_exercise_to_cells(exercise):\n \"\"\"\n Generates a header, exercise text and code cell from an Exercise class\n \"\"\"\n cells = []\n markdown_text = \"## \" + exercise.name + \"\\n\"\n markdown_text += textwrap.dedent(exercise.__doc__) or \"\"\n cells.append(nbf.new_markdown_cell(markdown_text))\n\n code_cell_text = \"%%run_exercise \" + exercise.name + \"\\n\"\n code_cell_text += textwrap.dedent(exercise.cell_code)\n cells.append(nbf.new_code_cell(code_cell_text))\n\n return cells\n\n\ndef initialize(filepath):\n \"\"\"\n This function should be called at the beginning of an exercise notebook to initialize everything\n \"\"\"\n filepath = os.path.abspath(filepath)\n\n exercise_module = load_exercise_module(filepath)\n exercises = exercise_module.exercises\n\n add_magic(exercises)\n\n css_path = os.path.join(os.path.dirname(etude.__file__), \"assets\", \"notebook_style.css\")\n display(load_css(css_path))\n\n\ndef load_exercise_module(filepath):\n try:\n exercise_module = importlib.machinery.SourceFileLoader(\n 'exercise_module', filepath\n ).load_module()\n except AttributeError: # Python2\n import imp\n exercise_module = imp.load_source(\"exercise_module\", filepath)\n\n return exercise_module\n\n\ndef load_css(filepath):\n \"\"\"\n Return an HTML object containing the style rules from the given file\n \"\"\"\n with open(filepath) as css_file:\n css_code = css_file.read()\n return HTML(\"\")\n","repo_name":"KristianJensen/etude","sub_path":"etude/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"34784686743","text":"from cs1media import *\r\n\r\ndef scale(img):\r\n w,h = img.size()\r\n new_img = create_picture(w/4,h/4)\r\n norm = 4 ** 2\r\n for y in range(h/4):\r\n for x in range(w/4):\r\n r, g, b = 0, 0, 0\r\n x1 = x * 4\r\n y1 = y * 4\r\n for a1 in range(4):\r\n for a2 in range(4):\r\n r0,g0,b0 = img.get(x1+a1,y1+a2)\r\n r, g, b = r+r0, g+g0, b+b0\r\n r, g, b = r/norm, g/norm, b/norm\r\n new_img.set(x, y, (r, g, b))\r\n return new_img\r\n\r\n\r\ndef crossfade(img1, img2):\r\n yuna = scale(img1)\r\n wonbin = scale(img2)\r\n w,h = img1.size()\r\n new_img = create_picture(w,h)\r\n for j in range(4):\r\n for i in range(4):\r\n for y in range(h/4):\r\n for x in range(w/4):\r\n r1,g1,b1 = yuna.get(x,y)\r\n r2,g2,b2 = wonbin.get(x,y)\r\n r1, g1, b1 = r1*((15-(4*i+j))/15.0), g1*((15-(4*i+j))/15.0), b1*((15-(4*i+j))/15.0)\r\n r2,g2,b2 = r2*((4*i+j)/15.0),g2*((4*i+j)/15.0),b2*((4*i+j)/15.0)\r\n r, g, b = r1 + r2, g1 + g2, b1 + b2\r\n new_img.set(x+j*w/4,y+i*h/4,(r,g,b))\r\n new_img.show()\r\ncrossfade(load_picture(\"yuna.jpg\"), load_picture(\"wonbin.jpg\"))\r\n\r\n\r\n\r\n\r\n","repo_name":"jang1563/2010_Python_class","sub_path":"Lab/Lab7/lab7.py","file_name":"lab7.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"27822334755","text":"'''\n감상 : 저어는 아직도 노드를 정점이라고 부르는게 익숙하지 않아요\n접근 : \n루트가 U인 서브 트리 노드 수 구하는 문제, 이지만 일단 숫자를 보니 DP 쓰는건 확정\n전부 탐방할때 올바른 트리임이 보장 되니까 DFS 써도 되고, BFS 써도 되고...\n'''\n\nimport sys\ninput = sys.stdin.readline\nsys.setrecursionlimit(100000)\n\ndef dfs(node):\n nodes[node] = 1\n visited[node] = 1\n # 돌아가면서 깊게 방문\n for i in trees[node]:\n if not visited[i]:\n dfs(i)\n nodes[node] = nodes[node] + nodes[i]\n return\n\n# 정점 수, 루트 번호, 쿼리 수\nN, R, Q = map(int, input().split())\ntrees = [[] for _ in range(N+1)]\nnodes = [0] * (N+1) # dp할 answer\nvisited = [0] * (N+1)\n\n# 간선 수 : N - 1\nfor _ in range(N-1):\n nodeA, nodeB = map(int, input().split())\n trees[nodeA].append(nodeB) # 무방향\n trees[nodeB].append(nodeA) # 무방향\n\n# 판 다 깔았고\ndfs(R)\n# print(nodes)\n\n# 출력\nfor _ in range(Q):\n print(nodes[int(input())])\n\n\n'''\n뭐가 올바른 트리 보장이냐 하 씨;; \nsys.setrecursionlimit(100000) 이거 없어서 안돌아간거\n뭘 잘 못쓴건 줄 알고 트리랑 오타만 30분간 찾음. BFS나 쓸 걸...\n근데 저 문장 왜 최대치만 설정해놓는건데 메모리 먹음??\n'''","repo_name":"k-min9/TIL","sub_path":"00. Daily Algorithm/BOJ/BOJ15681_Gd5_트리와_쿼리.py","file_name":"BOJ15681_Gd5_트리와_쿼리.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"69935466999","text":"import streamlit as st\nfrom PIL import Image\ndef app():\n st.title(\"About\")\n image1=Image.open('unnamed-modified.png')\n col1,col2,col3=st.columns(3)\n col1.image(image1)\n col1.subheader(\"Kartik Chhipa\")\n col1.write(\"B20CS084\")\n col1.write(\"Computer Science and Engineering\")\n image2=Image.open('WhatsApp Image 2022-05-02 at 7.30.05 PM-modified.png')\n col2.image(image2)\n col2.subheader(\"Rushil Shah\")\n col2.write(\"B20AI036\")\n col2.write(\"Artificial Intelligence and Data Science\")\n image3=Image.open('WhatsApp Image 2022-05-02 at 9.07.01 PM-modified.png')\n col3.image(image3)\n col3.subheader(\"Ruthvik K\")\n col3.write(\"B20CS037\")\n col3.write(\"Artificial Intelligence and Data Science\")\n\n ","repo_name":"kartikchhipa/HeartFailurePrediction","sub_path":"About.py","file_name":"About.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"36303821207","text":"from django.contrib.admin.actions import delete_selected\nfrom django.forms import Select, SelectMultiple\nfrom django.utils.encoding import smart_text\nfrom django.utils.html import conditional_escape, escape\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext_lazy\nfrom mptt.admin import MPTTModelAdmin\nfrom iotrec_api.models import User, Thing, Category, Recommendation, Feedback, Preference, IotRecSettings, Rating, \\\n Stay, SimilarityReference, Context\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.forms import UserChangeForm, UserCreationForm\nfrom django.contrib import admin\nfrom django import forms\nfrom iotrec_api.utils import similarity_reference\nfrom iotrec_api.utils.category import calc_items_in_cat_list\n\n# display seconds in admin\nfrom django.conf.locale.en import formats as en_formats\nen_formats.DATETIME_FORMAT = \"d-m-Y H:i:s\"\n\n\nclass InlineFormset(forms.models.BaseInlineFormSet):\n def clean(self):\n for form in self.forms:\n for field in form.changed_data:\n print(form.cleaned_data[field])\n\n\n# source: https://stackoverflow.com/a/17496836\nclass IotRecUserChangeForm(UserChangeForm):\n class Meta(UserChangeForm.Meta):\n model = User\n\n\n# source: https://stackoverflow.com/a/17496836\nclass IotRecUserCreationForm(UserCreationForm):\n class Meta(UserCreationForm.Meta):\n model = User\n\n def clean_username(self):\n username = self.cleaned_data['username']\n try:\n User.objects.get(username=username)\n except User.DoesNotExist:\n return username\n raise forms.ValidationError(self.error_messages['duplicate_username'])\n\n\nclass PreferencesInLine(admin.TabularInline):\n model = Preference\n extra = 0\n formset = InlineFormset\n\n\nclass IotRecSettingsAdmin(admin.ModelAdmin):\n fields = ['evaluation_mode', 'training_active', 'recommendation_threshold', 'nr_of_reference_things_per_thing',\n 'category_weight', 'locality_weight', 'prediction_weight', 'context_weight']\n list_display = ('pk', 'evaluation_mode', 'training_active', 'recommendation_threshold',\n 'nr_of_reference_things_per_thing', 'category_weight', 'locality_weight', 'prediction_weight',\n 'context_weight')\n\n def get_readonly_fields(self, request, obj=None):\n return ['locality_weight', 'context_weight']\n\n\nadmin.site.register(IotRecSettings, IotRecSettingsAdmin)\n\n\n# source: https://stackoverflow.com/a/17496836\nclass IotRecUserAdmin(UserAdmin):\n form = IotRecUserChangeForm\n add_form = IotRecUserCreationForm\n inlines = [PreferencesInLine]\n list_display = UserAdmin.list_display + ('preferences_selected',)\n\n # custom calculated field to get the number of preferences per user in the list\n def preferences_selected(self, obj):\n return obj.preferences.count()\n\n preferences_selected.short_description = 'Preferences Selected'\n preferences_selected.admin_order_field = 'preferences_selected'\n\n\nadmin.site.register(User, IotRecUserAdmin)\n\n\nclass CategoryAdmin(MPTTModelAdmin):\n list_display = ('name', 'text_id', 'nr_of_items_recursive', 'things_assigned', 'ref_things_assigned',\n 'user_prefs_positive', 'user_prefs_negative', 'is_alias', 'get_alias_owner_full')\n\n def get_readonly_fields(self, request, obj=None):\n return ['nr_of_items_recursive']\n\n def get_queryset(self, request):\n return Category.objects.exclude(text_id=\"Root\")\n\n def get_alias_owner_full(self, obj):\n if obj.alias_owner is not None:\n ancestors = obj.alias_owner.get_ancestors(ascending=False, include_self=True)\n output_string = \"\"\n for a in ancestors:\n output_string += '/' + a.name\n return output_string\n\n # number of things in a category\n def things_assigned(self, obj):\n return obj.thing_set.count()\n\n things_assigned.short_description = 'Things Assigned'\n things_assigned.admin_order_field = 'things_assigned'\n\n # number of reference things in a category\n def ref_things_assigned(self, obj):\n return obj.referencething_set.count()\n\n ref_things_assigned.short_description = 'Reference Things Assigned'\n ref_things_assigned.admin_order_field = 'ref_things_assigned'\n\n # number of users that like this category\n def user_prefs_positive(self, obj):\n return obj.preferences.filter(value=1).count()\n\n user_prefs_positive.short_description = 'User Prefs +'\n user_prefs_positive.admin_order_field = 'user_prefs_positive'\n\n # number of users that dislike this category\n def user_prefs_negative(self, obj):\n return obj.preferences.filter(value=-1).count()\n\n user_prefs_negative.short_description = 'User Prefs -'\n user_prefs_negative.admin_order_field = 'user_prefs_negative'\n\n\nadmin.site.register(Category, CategoryAdmin)\n\n\nclass SelectMultipleWithDisabled(SelectMultiple):\n def create_option(self, name, value, label, selected, index, subindex=None, attrs=None):\n option = super().create_option(name, value, label, selected, index, subindex, attrs)\n\n if attrs is None:\n attrs = {}\n if label is None:\n label = {}\n option_attrs = self.build_attrs(self.attrs, attrs) if self.option_inherits_attrs else {}\n if 'selected' in label and label['selected'] is True:\n option_attrs.update(self.checked_attribute)\n if 'id' in option_attrs:\n option_attrs['id'] = self.id_for_label(option_attrs['id'], index)\n if 'disabled' in label and label['disabled'] is True:\n option_attrs['disabled'] = 'disabled'\n if 'label' in label:\n option['label'] = label['label']\n if 'selected' in label:\n option['selected'] = label['selected']\n option['attrs'] = option_attrs\n\n return option\n\n\nclass ThingAdminForm(forms.ModelForm):\n class Meta:\n model = Thing\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n self.level_indicator = kwargs.pop('level_indicator', u'  ')\n\n super(ThingAdminForm, self).__init__(*args, **kwargs)\n\n queryset = Category.objects.all()\n mptt_opts = queryset.model._mptt_meta\n queryset = queryset.order_by(mptt_opts.tree_id_attr, mptt_opts.left_attr)\n\n choices = []\n for item in queryset:\n level = getattr(item, item._mptt_meta.level_attr)\n value = item.text_id\n label = mark_safe(conditional_escape(self.level_indicator) * level + smart_text(item.name))\n if item.is_leaf_node():\n choices.append(\n (value, {'selected': item in self.instance.categories.all(), 'label': label, 'disabled': False}))\n else:\n choices.append(\n (value, {'selected': item in self.instance.categories.all(), 'label': label, 'disabled': True}))\n\n self.fields['categories'] = forms.ChoiceField(choices=choices, widget=SelectMultipleWithDisabled)\n\n\nclass BulkDeleteMixin(object):\n class SafeDeleteQuerysetWrapper(object):\n def __init__(self, wrapped_queryset):\n self.wrapped_queryset = wrapped_queryset\n\n def _safe_delete(self):\n for obj in self.wrapped_queryset:\n categories = set(obj.categories.all())\n obj.delete()\n calc_items_in_cat_list(categories)\n\n def __getattr__(self, attr):\n if attr == 'delete':\n return self._safe_delete\n else:\n return getattr(self.wrapped_queryset, attr, None)\n\n def __iter__(self):\n for obj in self.wrapped_queryset:\n yield obj\n\n def __getitem__(self, index):\n return self.wrapped_queryset[index]\n\n def __len__(self):\n return len(self.wrapped_queryset)\n\n def get_actions(self, request):\n actions = getattr(super(BulkDeleteMixin, self), \"get_actions\")(request)\n actions['delete_selected'] = (BulkDeleteMixin.action_safe_bulk_delete, 'delete_selected', ugettext_lazy(\"Delete selected %(verbose_name_plural)s\"))\n return actions\n\n def action_safe_bulk_delete(self, request, queryset):\n wrapped_queryset = BulkDeleteMixin.SafeDeleteQuerysetWrapper(queryset)\n return delete_selected(self, request, wrapped_queryset)\n\n\nclass ThingAdmin(BulkDeleteMixin, admin.ModelAdmin):\n fields = ['id', 'title', 'description', 'categories', 'type', 'ibeacon_uuid', 'ibeacon_major_id',\n 'ibeacon_minor_id', 'eddystone_namespace_id', 'eddystone_instance_id', 'scenario', 'image',\n 'indoorsLocation', 'address', 'location', 'created_at', 'updated_at']\n list_display = ('id', 'title', 'type', 'scenario', 'ibeacon_uuid', 'ibeacon_major_id', 'ibeacon_minor_id',\n 'eddystone_namespace_id', 'eddystone_instance_id', 'indoorsLocation', 'categories_assigned')\n ordering = ('-created_at',)\n\n # load custom CSS and JS for more comfortable category selection\n class Media:\n js = ('js/thing_admin.js',)\n css = {\n 'all': ('css/thing_admin.css',)\n }\n\n def get_readonly_fields(self, request, obj=None):\n return ['id', 'created_at', 'updated_at']\n\n # store old/previous categories and find new categories\n # then re-calculate the nr of items per category (for the changed one)\n def save_related(self, request, form, formsets, change):\n old_categories = set(form.instance.categories.all())\n super(ThingAdmin, self).save_related(request, form, formsets, change)\n new_categories = set(form.instance.categories.all())\n calc_items_in_cat_list((old_categories | new_categories))\n similarity_reference.calculate_similarity_references_per_thing(form.instance)\n\n def delete_model(self, request, obj):\n categories = set(obj.categories.all())\n super(ThingAdmin, self).delete_model(self, obj)\n calc_items_in_cat_list(categories)\n\n # add categories counter to list\n def categories_assigned(self, obj):\n return obj.categories.count()\n\n categories_assigned.short_description = 'Categories Assigned'\n categories_assigned.admin_order_field = 'categories_assigned'\n\n\nadmin.site.register(Thing, ThingAdmin)\n\n\nclass RecommendationAdmin(admin.ModelAdmin):\n fields = ['id', 'user', 'thing', 'context', 'experiment', 'score', 'preference_score', 'context_score',\n 'invoke_rec', 'created_at', 'updated_at']\n list_display = ('id', 'created_at', 'user', 'thing', 'score', 'preference_score', 'context_score', 'experiment',\n 'invoke_rec')\n ordering = ('-created_at',)\n list_filter = ['user', 'thing', 'experiment', 'created_at', 'invoke_rec']\n\n def get_readonly_fields(self, request, obj=None):\n return ['id', 'created_at', 'updated_at', 'score', 'preference_score', 'context_score', 'invoke_rec']\n\n\nadmin.site.register(Recommendation, RecommendationAdmin)\n\n\nclass FeedbackAdmin(admin.ModelAdmin):\n fields = ['id', 'recommendation', 'value', 'created_at', 'updated_at']\n list_display = ('id', 'created_at', 'recommendation', 'value')\n ordering = ('-created_at',)\n\n def get_readonly_fields(self, request, obj=None):\n return ['id', 'created_at', 'updated_at']\n\n\nadmin.site.register(Feedback, FeedbackAdmin)\n\n\nclass RatingAdmin(admin.ModelAdmin):\n fields = ['id', 'recommendation', 'value', 'created_at', 'updated_at']\n list_display = ('id', 'created_at', 'recommendation', 'value')\n ordering = ('-created_at',)\n\n def get_readonly_fields(self, request, obj=None):\n return ['id', 'created_at', 'updated_at']\n\n\nadmin.site.register(Rating, RatingAdmin)\n\n\nclass PreferenceAdmin(admin.ModelAdmin):\n fields = ['id', 'user', 'category', 'value', 'created_at', 'updated_at']\n list_display = ('id', 'created_at', 'user', 'category', 'value')\n ordering = ('-created_at',)\n\n def get_readonly_fields(self, request, obj=None):\n return ['id', 'created_at', 'updated_at']\n\n\nadmin.site.register(Preference, PreferenceAdmin)\n\n\nclass SimilarityReferenceAdmin(admin.ModelAdmin):\n fields = ['id', 'reference_thing', 'thing', 'similarity', 'created_at', 'updated_at']\n list_display = ('id', 'reference_thing', 'thing', 'similarity')\n\n def get_readonly_fields(self, request, obj=None):\n return ['id', 'created_at', 'updated_at']\n\n\nadmin.site.register(SimilarityReference, SimilarityReferenceAdmin)\n\n\nclass StayAdmin(admin.ModelAdmin):\n fields = ['id', 'user', 'thing', 'start', 'last_checkin', 'end', 'experiment', 'created_at', 'updated_at']\n list_display = ('id', 'user', 'thing', 'start', 'last_checkin', 'end')\n list_filter = ['user', 'thing', 'experiment', 'created_at']\n\n def get_readonly_fields(self, request, obj=None):\n return ['id', 'created_at', 'updated_at']\n\n\nadmin.site.register(Stay, StayAdmin)\n\n\nclass ContextAdmin(admin.ModelAdmin):\n fields = ['id', 'weather', 'temperature_raw', 'temperature', 'length_of_trip_raw', 'length_of_trip', 'crowdedness',\n 'time_of_day', 'created_at', 'updated_at']\n list_display = ('id', 'created_at', 'weather', 'temperature', 'length_of_trip', 'crowdedness', 'time_of_day')\n list_filter = ['created_at', 'recommendation__user']\n\n def get_readonly_fields(self, request, obj=None):\n return ['id', 'created_at', 'updated_at']\n\n\nadmin.site.register(Context, ContextAdmin)\n\n\n\n","repo_name":"alex2702/iotrec-backend","sub_path":"iotrec_api/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":13517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"9782202054","text":"import traceback\n\nfrom pyspark.sql import SparkSession\nimport findspark\nfrom pyspark.ml.feature import VectorAssembler\nfrom pyspark.ml.regression import LinearRegression, LinearRegressionModel\nfrom pyspark.ml.evaluation import RegressionEvaluator\n\nfindspark.init('D:\\Spark\\spark-3.2.3-bin-hadoop3.2')\n\n\n\ndef linear_regression_test():\n spark = SparkSession.builder \\\n .config(\"spark.jars\", \"///D:/Spark/spark-3.2.3-bin-hadoop3.2/jars/spark-sql-kafka-0-10_2.12-3.2.3.jar\" + \",\" +\n \"///D:/Spark/spark-3.2.3-bin-hadoop3.2/jars/kafka-clients-3.2.3.jar\" + \",\" +\n \"///D:/Spark/spark-3.2.3-bin-hadoop3.2/jars/commons-pool2-2.8.0.jar\" + \",\" +\n \"///D:/Spark/spark-3.2.3-bin-hadoop3.2/jars/spark-token-provider-kafka-0-10_2.12-3.2.3.jar\") \\\n .appName(\"BTC_Transactions_Fees_Prediction\") \\\n .getOrCreate()\n\n\n # Finally, we can use the trained model to make predictions on new BTC transactions\n new_transactions = spark.read.format(\"csv\") \\\n .option(\"header\", \"true\") \\\n .option(\"inferSchema\", \"true\") \\\n .load(\"BTC_1_TEST.csv\")\n\n # we need to assemble the features into a vector using VectorAssembler:\n assembler = VectorAssembler(inputCols=[\"size\", \"trans_fees_2\"], outputCol=\"features\")\n new_transactions = assembler.transform(new_transactions)\n\n # Load the saved model\n saved_trained_model = LinearRegressionModel.load(\n \"E:/CSUF/Spring 2023/531 Adv Database/Projects/Spark-Streaming-BTC/TRAINED_MODELS/regression_model\")\n\n predictions = saved_trained_model.transform(new_transactions)\n\n predictions = predictions.toPandas()\n predictions.to_excel('BTC_Transaction_PREDICTION_test.xlsx', sheet_name='Sheet1', index=True)\n\n\nif __name__ == '__main__':\n\n try:\n linear_regression_test()\n\n except Exception as e:\n traceback.print_exc()\n exit(\"Error in Spark App\")\n","repo_name":"tejs13/Spark-Streaming-BTC","sub_path":"LinearRegressionTest.py","file_name":"LinearRegressionTest.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"20265949745","text":"class Human():\n def __init__(self, id: str, full_name: str, age: int, priority: bool, blood_type: str) -> None:\n self.id = id\n self.full_name = full_name\n self.age = age\n self.priority = priority\n self.blood_type = blood_type\n self.family = []\n\n def __str__(self) -> str:\n return (self.__repr__())\n\n def __repr__(self) -> str:\n return (f\"(ID: {self.id} Name: {self.full_name:>20} Age:{self.age} {self.priority} {len(self.family)}\")\n\n def add_family_member(self, person):\n self.family.append(person)\n person.family = self.family\n\n\nclass Queue():\n def __init__(self) -> None:\n self.humans = []\n\n def add_person(self, person):\n if person.age > 60:\n print(self.humans)\n self.humans = [person]+self.humans[::]\n print(self.humans)\n else:\n self.humans = self.humans[::]+[person]\n\n def find_in_queue(self, person):\n for k, p in enumerate(self.humans):\n if p.full_name == person:\n return self.humans[k]\n print(\"Ther is no this person in queue\")\n return None\n\n def swap(self, person1, person2):\n if len(self.humans) > 1:\n k1 = [k for k, v in enumerate(self.humans) if v in [\n person1, person2]]\n if len(k1) == 2:\n t = self.humans[k1[0]]\n self.humans[k1[0]] = self.humans[k1[1]]\n self.humans[k1[1]] = t\n\n def get_next(self):\n t = self.humans[0]\n self.humans = self.humans[1:]\n return t\n\n def get_next_blood_type(self, blood_type):\n k1 = [k for k, v in enumerate(\n self.humans) if v.blood_type == blood_type]\n t = self.humans[k1[0]]\n self.humans = self.humans[0:k1[0]]+self.humans[k1[0]+1:]\n return t\n\n def sort_by_age(self):\n weight_list = [p.age+p.priority*1000 for p in self.humans]\n for a in range(len(weight_list)):\n for b in range(a, len(weight_list)):\n if weight_list[a] < weight_list[b]:\n t = weight_list[a]\n weight_list[a] = weight_list[b]\n weight_list[b] = t\n t = self.humans[a]\n self.humans[a] = self.humans[b]\n self.humans[b] = t\n\n def rearange_queue(self):\n pass\n\n\nh1 = Human('123456', 'Mikle Porot', 20, False, 'A')\nh2 = Human('123457', 'Jone Smith', 10, False, 'A')\nh3 = Human('123458', 'Petr Semenov', 70, True, 'AB')\nh4 = Human('123459', 'Marina Tah', 25, False, 'AB')\nh5 = Human('123459', 'Juriy Tah', 26, False, 'B')\nh6 = Human('123459', 'Pavel Tah', 12, False, 'A')\nh_list = [h1, h2, h3, h4, h5, h6]\n\nq = Queue()\nfor p in h_list:\n q.add_person(p)\nprint(q.humans)\n\nprint(q.find_in_queue(\"Mikle Pddorot\"))\nq.swap(h3, h1)\nprint(q.humans)\nprint(\"Next: \", q.get_next())\nprint(q.humans)\nprint(\"Blood: \", q.get_next_blood_type('AB'))\nprint(q.humans)\n\nh11 = Human('123456', '1Mikle Porot', 21, False, 'A')\nh12 = Human('123457', '2Jone Smith', 14, False, 'A')\nh13 = Human('123458', '3Petr Semenov', 72, False, 'AB')\nh14 = Human('123459', '4Marina Tah', 24, True, 'AB')\nh15 = Human('123459', '5Juriy Tah', 28, False, 'B')\nh16 = Human('123459', '6Pavel Tah', 16, False, 'A')\n\nh13.add_family_member(h11)\nh13.add_family_member(h12)\nh13.add_family_member(h15)\n\nh_list = [h11, h12, h13, h14, h15, h16]\nfor p in h_list:\n q.add_person(p)\n\nq.sort_by_age()\nfor p in q.humans:\n print(p)\n","repo_name":"Dimus73/00-di-learning","sub_path":"03-week-03/mini_project_vaccines/vaccines.py","file_name":"vaccines.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"22070945192","text":"import torch\nfrom torch import nn\n\n\nclass LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)\n","repo_name":"NVlabs/affordance_diffusion","sub_path":"ldm/modules/ema.py","file_name":"ema.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"4"} +{"seq_id":"11357953213","text":"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include('core.urls')),\n path('dashboard/expenses/', include('expense.urls')),\n path('dashboard/income/', include('income.urls')),\n path('auth/', include('user_authentication.urls')),\n path('account/', include('user_preferences.urls')),\n path('api/', include('api_app.urls'))\n\n]\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nhandler404='core.views.error_404'","repo_name":"Sidney2022/Income-Expense-Tracker","sub_path":"income_expense_tracker/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"73740379994","text":"#!/usr/bin/env python\nfrom __future__ import print_function, division\n\nimport sys\nimport os\nimport argparse\nimport subprocess\nimport time\nimport json\nimport socket\nimport re\n\nimport config\n\nansi_escape = re.compile(r'\\x1b[^m]*m')\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Gather performance numbers for Delite apps.\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\")\n parser.add_argument(\"-f\", \"--force\", action=\"store_true\",\n help=\"force execution even if the git repository contains uncommitted changes\")\n parser.add_argument(\"-r\", \"--runs\", type=int, default=\"5\",\n help=\"number of times to run the apps\")\n parser.add_argument(\"-p\", \"--filtered-runs\", type=int, default=\"2\",\n help=\"filter the first this many runs\")\n parser.add_argument(\"-d\", \"--directory\", type=str, default=\"/kunle/ppl/delite/benchmark/times\",\n help=\"directory to write the raw output into\")\n parser.add_argument(\"-j\", \"--json-directory\", type=str, default=\"/kunle/ppl/delite/benchmark/json\",\n help=\"directory to write the json output into\")\n parser.add_argument(\"-s\", \"--skip-runs\", action=\"store_true\",\n help=\"skip all process calls\")\n parser.add_argument(\"apps\", type=str, nargs=\"*\", default=config.default_apps, help=\"apps to run\")\n\n args = parser.parse_args()\n\n # collect the apps to run\n apps = []\n dsls = []\n for a in args.apps:\n if (a not in config.apps):\n print(\"error: app {0} not found in config file\".format(a), file=sys.stderr)\n exit(-1)\n app = config.apps[a]\n if(app.dsl not in dsls):\n dsls.append(app.dsl)\n if(args.verbose):\n print(\"notice: identified dsl {0}\".format(app.dsl.name), file=sys.stderr)\n apps.append(app)\n if(args.verbose):\n print(\"notice: identified app {0}\".format(app.name), file=sys.stderr)\n\n # chdir to the hyperdsl root directory\n hyperdsl_root = chdir_hyperdsl_root()\n\n # check that there are no changes to the repository\n git_status = subprocess.check_output(\"git status -s\", shell=True)\n if((not args.force) and (git_status != \"\")):\n print(\"error: hyperdsl repository contains uncommitted changes\", file=sys.stderr)\n print(\" commit these changes before running benchmark.py\", file=sys.stderr)\n exit(-1)\n\n # identify the hash associated with the current branch\n git_hash = subprocess.check_output(\"git rev-parse --short HEAD\", shell=True).strip()\n if(args.verbose):\n print(\"notice: identified git hash {0}\".format(git_hash), file=sys.stderr)\n\n if(args.verbose):\n print(\"notice: creating directory for experimental results\", file=sys.stderr)\n subprocess.check_call(\"rm -f {0}/latest\".format(args.directory), shell=True)\n subprocess.check_call(\"mkdir -p {0}/{1}\".format(args.directory, git_hash), shell=True)\n subprocess.check_call(\"ln -s {0} {1}/latest\".format(git_hash, args.directory), shell=True)\n\n output_json = {}\n output_json[\"git_hash\"] = git_hash\n output_json[\"start_time\"] = time.time()\n output_json[\"host\"] = socket.gethostname()\n output_json[\"dsls\"] = {}\n output_json[\"apps\"] = {}\n output_json[\"app_names\"] = []\n\n if(args.verbose):\n print(\"notice: publishing forge dsls\", file=sys.stderr)\n for dsl in dsls:\n if dsl.needs_publish:\n if(args.verbose):\n print(\"notice: publishing {0}\".format(dsl.name), file=sys.stderr)\n output_json[\"dsls\"][dsl.name] = json_call(dsl.publish_command, \n \"{0}/{1}/{2}.publish\".format(args.directory, git_hash, dsl.name), args.skip_runs)\n\n if(args.verbose):\n print(\"notice: running apps\", file=sys.stderr)\n for app in apps:\n output_json[\"app_names\"].append(app.name)\n output_json[\"apps\"][app.name] = {}\n if(args.verbose):\n print(\"notice: staging {0}\".format(app.name), file=sys.stderr)\n os.chdir(app.dsl.run_dir)\n output_json[\"apps\"][app.name][\"stage\"] = json_call(app.stage_command(), \n \"{0}/{1}/{2}.delitec\".format(args.directory, git_hash, app.name), args.skip_runs)\n output_json[\"apps\"][app.name][\"configs\"] = []\n output_json[\"apps\"][app.name][\"runs\"] = {}\n for c in app.configs:\n output_json[\"apps\"][app.name][\"configs\"].append(c.name)\n if(args.verbose):\n print(\"notice: running {0} under configuration {1}\".format(app.name, c.name), file=sys.stderr)\n opts = \" -Dstats.dump -Dstats.dump.component=app -Dstats.dump.overwrite -Dstats.output.dir={0}/{1} -Dstats.output.filename={2}-{3}.times {4}\".format(\n args.directory, git_hash, app.name, c.name, os.getenv(\"JAVA_OPTS\", \"\"))\n os.putenv(\"JAVA_OPTS\", opts)\n output_json[\"apps\"][app.name][\"runs\"][c.name] = json_call(app.run_command(c, args.runs, args.verbose),\n \"{0}/{1}/{2}-{3}.delite\".format(args.directory, git_hash, app.name, c.name), args.skip_runs)\n output_json[\"apps\"][app.name][\"runs\"][c.name][\"opts\"] = opts\n cafn = \"{0}/{1}/{2}-{3}.times\".format(args.directory, git_hash, app.name, c.name)\n if(os.path.isfile(cafn)):\n with open(cafn, \"r\") as ftimes:\n raw_times = [float(t)*1e-6 for t in ftimes.read().strip().split(\"\\n\")]\n output_json[\"apps\"][app.name][\"runs\"][c.name][\"raw_times\"] = raw_times\n if (c.run_only_once):\n if(len(raw_times) == 1):\n output_json[\"apps\"][app.name][\"runs\"][c.name][\"filtered_times\"] = raw_times\n output_json[\"apps\"][app.name][\"runs\"][c.name][\"avg_time\"] = raw_times[0]\n else:\n output_json[\"apps\"][app.name][\"runs\"][c.name][\"filtered_times\"] = []\n output_json[\"apps\"][app.name][\"runs\"][c.name][\"avg_time\"] = 0.0\n else:\n if(len(raw_times) == args.runs):\n filtered_times = raw_times[args.filtered_runs:]\n output_json[\"apps\"][app.name][\"runs\"][c.name][\"filtered_times\"] = filtered_times\n output_json[\"apps\"][app.name][\"runs\"][c.name][\"avg_time\"] = sum(filtered_times)/(len(filtered_times) + 1e-60)\n else:\n output_json[\"apps\"][app.name][\"runs\"][c.name][\"filtered_times\"] = []\n output_json[\"apps\"][app.name][\"runs\"][c.name][\"avg_time\"] = 0.0\n else:\n output_json[\"apps\"][app.name][\"runs\"][c.name][\"raw_times\"] = []\n output_json[\"apps\"][app.name][\"runs\"][c.name][\"filtered_times\"] = []\n output_json[\"apps\"][app.name][\"runs\"][c.name][\"avg_time\"] = 0.0\n os.chdir(hyperdsl_root)\n\n output_json[\"end_time\"] = time.time()\n output_json[\"total_time\"] = output_json[\"end_time\"] - output_json[\"start_time\"]\n if(args.verbose):\n print(\"notice: ran for {0} seconds\".format(output_json[\"total_time\"]), file=sys.stderr)\n json.dump(output_json, open(\"{0}/{1}.json\".format(args.json_directory, git_hash), \"w\"))\n\n\n\ndef chdir_hyperdsl_root():\n # first, change the current working directory to the hyperdsl root\n hyperdsl_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n if(os.path.abspath(os.getcwd()) != hyperdsl_root):\n print(\"warning: benchmark.py not invoked from root of hyperdsl directory.\", file=sys.stderr)\n print(\" attempting to cd to [{0}]\".format(hyperdsl_root), file=sys.stderr)\n os.chdir(hyperdsl_root)\n if (os.path.abspath(os.getcwd()) != hyperdsl_root):\n print(\"error: unable to cd to hyperdsl root directory.\", file=sys.stderr)\n exit(-1)\n print(\" directory changed successfully!\", file=sys.stderr)\n\n # check that the root of the git repo is equal to the root of the hyperdsl repo\n try:\n git_root = subprocess.check_output(\"git rev-parse --show-toplevel\", shell=True).strip()\n if(git_root != hyperdsl_root):\n print(\"error: git root in unexpected location\", file=sys.stderr)\n exit(-1)\n except subprocess.CalledProcessError as e:\n print(\"error: unable to call git.\", file=sys.stderr)\n exit(-1)\n\n return hyperdsl_root\n\n\ndef json_call(command, file_pfx, skip_runs):\n rv = {}\n rv[\"command\"] = command\n if(not skip_runs):\n subprocess.call(command, stdout=open(file_pfx + \".out\", \"w\"), stderr=open(file_pfx + \".err\", \"w\"), shell=True)\n rv[\"out\"] = ansi_escape.sub(\"\", open(file_pfx + \".out\").read())\n rv[\"err\"] = ansi_escape.sub(\"\", open(file_pfx + \".err\").read())\n return rv\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"das-projects/Grothendieck","sub_path":"benchmark/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":8112,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"37705260546","text":"import pytest\nfrom System.weighted_picker import WeightedPicker\nfrom uno.deck import Deck\nimport copy\n\ndef pick_card():\n deck = Deck({\"number\": 3, \"special\": 10, \"wild\": 0}).stack\n picker = WeightedPicker({\"number\":2,\"special\":3})\n \n return picker.pick_card(deck)\n \ndef get_err():\n number = 0\n special = 0\n for _ in range(1000):\n if pick_card().card_type.is_number():\n number += 1\n else :\n special += 1\n\n print(\"숫자 카드: \" + str(number))\n print(\"기술 카드: \" + str(special))\n # print(\"오차: \" + str((400 - number)/4) + \" %\")\n \n return abs((400 - number)/4)\n \ndef test_answer():\n assert get_err() <= 5 \n \ntest_answer()\n","repo_name":"SSM-and-etc/Uno_SE","sub_path":"Script/test_picker.py","file_name":"test_picker.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"50"} +{"seq_id":"38212035029","text":"from fluxify.exceptions import ArgumentNotFoundException, InvalidArgumentException\n\n\ndef boolean(transformation):\n if not 'value' in transformation:\n raise ArgumentNotFoundException('[transformation][replace] \"value\" argument was not found in transformation mapping.')\n\n if type(transformation['value']) != str:\n raise InvalidArgumentException('[transformation][replace] \"value\" argument must be a string for replace')\n\n value = str(transformation['value']).lower()\n if type(value) == str and value == 'true':\n value = True\n\n if type(value) == str and value == 'false':\n value = False\n\n if type(value) == str and value.isdigit():\n value = int(value)\n\n return bool(value)\n","repo_name":"0xIbra/fluxify","sub_path":"fluxify/transformers/boolean.py","file_name":"boolean.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"2217612974","text":"import csv\nimport re\nimport uuid\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom csv_utils import CsvUtils\nfrom question_from_site import QuestionFromSite\n\nHEADERS = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.93 Safari/537.36 OPR/82.0.4227.33',\n 'accept': '*/*'\n}\n\n\ndef get_html(url, params=None):\n r = requests.get(\n url=url,\n headers=HEADERS,\n params=params\n )\n return r\n\n\ndef get_list_pages_count(html):\n soup = BeautifulSoup(html, 'html.parser')\n pagination_items = soup.findAll('div', class_='pagination')\n return int(pagination_items[0].findAll('a')[-1].text)\n\n\ndef get_details_pages_count(html):\n soup = BeautifulSoup(html, 'html.parser')\n pagination_items = soup.findAll('div', class_='pagination')\n return int(pagination_items[0].findAll('a')[-1].text)\n\n\ndef get_links(html):\n soup = BeautifulSoup(html, 'html.parser')\n\n try:\n items = soup.findAll('ul', class_='topiclist topics')[1].findAll('li')\n links_list = []\n\n for item in items:\n links_list.append({\n 'id': \"\",\n 'title': item.find('dt').find('a').text,\n 'question': \"\",\n 'link': item.find('dt').find('a').get('href'),\n 'createdAt': re.search(\"» (.*?)\\r\\n\", item.find('dt').find('div', class_='h599').text).group(1),\n 'updatedAt': re.search(\"\\n (.*?)\", item.find('dd', class_='lastpost').find('span').text).group(1),\n 'posts': item.find('dd', class_='posts').text,\n 'views': item.find('dd', class_='views').text,\n 'answers': \"\"\n })\n\n del links_list[0]\n return links_list\n except Exception:\n print('Error')\n\n\ndef get_details(html):\n soup = BeautifulSoup(html, 'html.parser')\n\n index = 0\n\n try:\n question_from_site = QuestionFromSite(\n str(uuid.uuid4()),\n \"\",\n \"\",\n \"\",\n \"\",\n []\n )\n\n items = soup.find('div', id='page-body').findAll('div', class_='postbody')\n\n for item in items:\n text_list = item.find('div', class_='content').text\n if index == 0:\n question_from_site.question = text_list\n else:\n question_from_site.answers_list.append(text_list)\n index += 1\n\n return question_from_site\n except Exception:\n print('Error')\n\n\ndef parse_pages(base_url, additional_url, end_url, filename):\n html = get_html(base_url + additional_url + end_url)\n\n if html.status_code == 200:\n items_to_save = []\n\n pages_count = get_list_pages_count(html.text)\n print(f'processing page: {0} total: {pages_count}')\n current_links = get_links(html.text)\n for current_link in current_links:\n current_url = current_link['link']\n details_html = get_html(current_url)\n details = get_details(details_html.text)\n current_link['id'] = details.question_id\n current_link['question'] = details.question\n current_link['answers'] = details.answers_list.__str__()\n\n items_to_save.extend(current_links)\n\n for page in range(1, pages_count - 1):\n print(f'processing page: {page} total: {pages_count}')\n html = get_html(f'{base_url}{additional_url}-{page*25}{end_url}')\n current_links = get_links(html.text)\n for current_link in current_links:\n current_url = current_link['link']\n details_html = get_html(current_url)\n details = get_details(details_html.text)\n current_link['id'] = details.question_id\n current_link['question'] = details.question\n current_link['answers'] = details.answers_list.__str__()\n\n items_to_save.extend(current_links)\n\n print(f'received {len(items_to_save)} objects')\n CsvUtils.save_question_from_site(items_to_save, filename)\n return items_to_save\n else:\n print(\"Error!\")\n\n\ndef parse_details(url):\n html = get_html(url)\n\n if html.status_code == 200:\n # pages_count = get_details_pages_count(html.text)\n # print(pages_count)\n details = get_details(html.text)\n print(details)\n","repo_name":"ruIlyaKiselev/diplom","sub_path":"scrapper.py","file_name":"scrapper.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"73932182555","text":"#!python\n#\n# Code 2023 by Jamel Simpson\n\n\"\"\" This script is made for the automated creation of chimeric proteins between one designated protein called a reference protein,\nand a designated section within the reference protein sequence, called sequence_of_interest being replaced by homologous sequences outlined in a list\naccession number sequences in a protein_info file\"\"\"\n\nfrom json import load\nfrom sys import argv\nimport ChimeraGenerator\nimport AccessiontoAlignment\n\n# 3 command line inputs are required for this production script, a .tsv file with accession numbers in column one\n# and the corresponding name you want associated with that sequence, these name will be used throughout all naming\n# including:fasta file names, alphafold out folders, in file fasta identifiers, plddt data files and so on\n# DO NOT INCLUDE YOUR REFERENCE PROTEIN IN THIS FILE\nprotein_info = str(argv[1])\n# The second is a chimera_arguments.json file modified to your liking, this file has naming conventions for all important\n# files generated like fasta and msas, for analysis later keep naming conventions consistent, the naming works by replacing the\n# character_to_replace which is defaulted with an asterisk ('*'), with the names supplied in the protein_info list that was generated\n# for the last input, there are also a couple operation that come as arrays with enclosed in [] and the first input is just\n# quotations \"\", if you place a # within the quotes it will prevent that file from being created, for example you prevent\n# the msa from being calculated by the script using this method like this \"muscle_command_for_msa\": [\"#\",\"module load gcc / 9.2.0 & & module load muscle / 3.8.31 & & muscle\"]\n# PROVIDE YOUR REFERENCE IN THE JSON INPUT FILE\nargument_json = str(argv[2])\n# Lastly and most simply, create a fasta file with the sequence that you want spliced out and replaced of your reference protein\nsequence_of_interest_fasta = argv[3]\n# These lines are extracting all the inputs and info outlined in the previous comments\nwith open(sequence_of_interest_fasta, 'r') as fasta:\n sequence_of_interest = ''.join([x for x in fasta if x[0] != '>' if x != '']).strip().replace('\\n', '')\nwith open(argument_json, 'rb') as jfile:\n argument_dict = load(jfile)[\"arguments\"]\n# In your protein_info file,\nwith open(protein_info, 'r') as info_list:\n info_list = info_list.readlines()\n# column 1: accession numbers\n accession_number = [x.split()[0] for x in info_list]\n# column 2: naming conventions per accession number sequence\n protein_list = [x.split()[-1] for x in info_list]\n# Here, important information from your command line inputs are turned into list iterables to be used in the map function later\nsequence_of_interest = [sequence_of_interest for x in protein_list]\nreference_protein = [argument_dict['reference_protein'] for x in protein_list]\nmsa_file = [argument_dict['msa_file_name'] for x in protein_list]\ncharacter_to_replace = argument_dict['character_to_replace']\nsubunits = [argument_dict['number_of_subunits'] for x in protein_list]\nemail = [argument_dict['email_for_accession'] for x in protein_list]\nmonomer_fastas = [argument_dict['monomer_fasta'].replace(character_to_replace, protein) for protein in protein_list]\nmultimer_fastas = [argument_dict['multimer_fasta'].replace(character_to_replace, protein) for protein in protein_list]\nchimera_fastas = [argument_dict['chimera_fastas'].replace(character_to_replace, protein) for protein in protein_list]\nmsa_fasta = argument_dict['msa_fasta']\nreference_protein_name = [argument_dict['reference_protein_fasta_identifier'] for protein in protein_list]\n# Here it is being determined whether you require multimer files for your protein, monomers files are created by default,\n# matter what for alignment purposes\nif subunits == 1:\n list(map(AccessiontoAlignment.accession_to_fasta, monomer_fastas, accession_number, email, subunits))\nelse:\n list(map(AccessiontoAlignment.accession_to_fasta, monomer_fastas, accession_number, email, subunits, multimer_fastas))\n# This is the code enacting the msa creation switch that's called by putting a # in the json file, outlined in above comments\nif argument_dict['muscle_command_for_msa'][0] == '':\n AccessiontoAlignment.multiple_sequence_alignment(monomer_fastas, msa_fasta,\n msa_file[0],\n reference_protein[0], argument_dict['muscle_command_for_msa'][1])\n# Here splicing information like the boundaries containing the sequence outlined in sequence_of_interest_fasta for your reference\n# protein and the sequence from your partner proteins that aligns with the sequence_of_interest in the msa\nsplice_info = list(map(AccessiontoAlignment.alignment_finder, msa_file,\n sequence_of_interest, protein_list, reference_protein_name))\nspliced_comparison_sequence = [x[0] for x in splice_info]\nreference_splice_boundaries = [x[2] for x in splice_info]\n# The part of the reference sequence that remains after splicing out the sequence_of_interest is created and marked for splicing\n# with the homologous sequence section\nreference_sequence_included = [x[1] for x in map(ChimeraGenerator.sequence_splice,\n reference_protein,\n reference_splice_boundaries)]\n# The 2 sections from the reference and splice partner are combined and put into a fasta file\nchimera_sequences = list(map(ChimeraGenerator.chimera_sequence_creation,\n spliced_comparison_sequence,\n reference_sequence_included))\nlist(map(ChimeraGenerator.fasta_creation, chimera_fastas, chimera_sequences, subunits))\n# This is an optional file that can be created that holds a list of all the fasta files that were created, chimeric or otherwise,\n# this list can be useful when inputting fasta files to alphafold, especially if you're scripting slurm jobs, this file's\n# creation can be turned on by removing the # from within the quotes in the json file on the dictionary key \"fasta_file_list_name\"\nif argument_dict['fasta_file_list_name'][0] != '#':\n if subunits == 1:\n fasta_list = monomer_fastas + chimera_fastas\n else:\n fasta_list = multimer_fastas + chimera_fastas\n with open(argument_dict['fasta_file_list_name'][1], 'w') as fasta_list_file:\n for fasta in fasta_list:\n fasta_list_file.write(f'{fasta}\\n')\n","repo_name":"kassonlab/Jamel","sub_path":"ProductionScript.py","file_name":"ProductionScript.py","file_ext":"py","file_size_in_byte":6495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"20526917740","text":"import sympy\r\nimport math\r\nfrom sympy.abc import x, y\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n#Change font type and font size in axis labels\r\nmatplotlib.rcParams.update({'legend.markerscale': 1.5, 'legend.handlelength': 1, 'legend.frameon': 1, 'legend.handletextpad': 1 , 'font.size': 18,'font.family':'Times New Roman'})\r\n\r\nmatplotlib.rcParams['text.usetex'] = True\r\n#matplotlib.rcParams['text.latex.unicode'] = True\r\n#matplotlib.rcParams['mathtext.fontset'] = 'stix'\r\n#matplotlib.rcParams['font.family'] = 'STIXGeneral'\r\n\r\ndef plot_streamlines(ax, u, v, xlim=(-2, 6), ylim=(-2, 6)):\r\n x0, x1 = xlim\r\n y0, y1 = ylim\r\n x = np.arange(0, 10)\r\n y = np.arange(0, 10)\r\n X, Y = np.meshgrid(x, y)\r\n u = np.ones((10, 10))\r\n v = np.ones((10, 10))\r\n ax.streamplot(X, Y, u, v, color='black', density=2 , arrowsize=1, arrowstyle='->', broken_streamlines=False)\r\n\r\ndef format_axes(ax):\r\n ax.set_aspect('equal')\r\n ax.figure.subplots_adjust(bottom=0, top=1, left=0, right=1)\r\n ax.xaxis.set_ticks([])\r\n ax.yaxis.set_ticks([])\r\n for spine in ax.spines.itervalues():\r\n spine.set_visible(False)\r\n\r\nxlim = ylim = (-2, 2)\r\nfig, ax = plt.subplots(figsize=(10, 10))\r\nplt.grid(True)\r\nplt.xlabel(r'$x_1$', fontname=\"Times New Roman\", fontsize=24)\r\nplt.ylabel(r'$x_2$', fontname=\"Times New Roman\", fontsize=24)\r\nplt.title(r'Paralelno strujanje pod kutem $\\alpha = 45^{\\circ}$')\r\nplot_streamlines(ax, (-4.99, 4.99), (-4.99, 4.97))\r\n\r\ntz1 = plt.Circle((0,0), radius=0.03, facecolor='black', edgecolor='black')\r\nax.add_patch(tz1)\r\n\r\n#Get current figure\r\nfigure = plt.gcf()\r\n#Set figure size\r\n#figure.set_size_inches(5, 5)\r\n#When saving, specify the DPI\r\nplt.savefig('zad_2_1_streamlines.pdf', bbox_inches='tight')\r\n#Show figure\r\nplt.show()\r\n\r\nformat_axes(ax) \r\n","repo_name":"cimbaIG/Python","sub_path":"MF2_Vjezbe_2_Potential_flow_Python/zad_2_1_streamplot.py","file_name":"zad_2_1_streamplot.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"28265305379","text":"import tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.training import training_util\nfrom tensorflow.python.training.session_run_hook import SessionRunArgs\n\nclass CustomCheckpointSaverHook(tf.train.CheckpointSaverHook):\n \"\"\"Because I don't want to use the default CheckpointSaverHook behavior, which\n appends the full graph definition in the summary events file every checkpoint, \n I'm writing a custom sub-class which overrides the offending function.\"\"\"\n\n def __init__(self, checkpoint_dir, save_secs=None, save_steps=None, saver=None,\n checkpoint_basename=\"model.ckpt\", scaffold=None, listeners=None):\n super().__init__(checkpoint_dir, save_secs=save_secs, save_steps=save_steps,\n saver=saver, checkpoint_basename=checkpoint_basename,\n scaffold=scaffold, listeners=listeners)\n\n def before_run(self, run_context):\n \"\"\"Essentially a copy of before_run as defined in the base class, except we\n don't add the default graph or any meta-graph data to the SummaryWriter\"\"\"\n if self._timer.last_triggered_step() is None:\n training_util.write_graph(\n ops.get_default_graph().as_graph_def(add_shapes=True),\n self._checkpoint_dir,\n \"graph.pbtxt\")\n saver_def = self._get_saver().saver_def if self._get_saver() else None\n\n return SessionRunArgs(self._global_step_tensor)","repo_name":"smeenehan/TensorFlowProjects","sub_path":"CIFAR/custom_session_run_hooks.py","file_name":"custom_session_run_hooks.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"30506955046","text":"import tkinter as tk\nfrom tkinter.simpledialog import askfloat\nimport math\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image, ImageTk, ImageOps\nimport tkinter.font\nimport argparse\nimport os\nimport glob\nimport sys\nfrom pathlib import Path\nfrom fastcore.script import *\nimport re\nfrom espiownage.core import *\nfrom itertools import cycle\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\n\n\n\ndef increment(num, digits,step):\n # return the first part of match as is, Return the 2nd match + 1 which is 'x + 1'\n return num.group(1) + str(int(num.group(2)) + step).zfill(digits)\n\ndef get_next_img(meta_file, step=1, img_bank='images/'):\n \"Increments the number of the image filename and tries to load it\"\n digits = len(Path(meta_file).stem.split('_')[-1]) # num digits in ending number part of the name\n next_meta = re.sub('(proc_)([0-9]{'+str(digits)+'})', lambda m: increment(m,digits,step), meta_file)\n next_img_path = meta_to_img_path(next_meta, img_bank=img_bank)\n if os.path.exists(str(next_img_path)):\n return ImageTk.PhotoImage(image=Image.open(next_img_path)), str(next_img_path)\n blank_img = Image.new('RGB', (512, 384), (255, 255, 255))\n return ImageTk.PhotoImage(image=blank_img), 'None'\n\n\n\ndef poly_oval(cx, cy, a, b, angle=0, steps=100 ):\n # From https://mail.python.org/pipermail/python-list/2000-December/022013.html\n \"\"\"return an oval as coordinates suitable for create_polygon\"\"\"\n\n # angle is in degrees anti-clockwise, convert to radians\n rotation = angle * math.pi / 180.0 # overall angle of the ellipse\n\n point_list = []\n\n # create the oval as a list of points\n for i in range(steps):\n\n # Calculate the angle for this step: 360 degrees == 2 pi radians\n theta = (math.pi * 2) * (float(i) / steps) # theta is for points drawing the circumference of the ellipse\n x1, y1 = a * math.cos(theta), b * math.sin(theta)\n\n # rotate x, y\n x = (x1 * math.cos(rotation)) + (y1 * math.sin(rotation))\n y = (y1 * math.cos(rotation)) - (x1 * math.sin(rotation))\n\n point_list.append((x + cx))\n point_list.append((y + cy))\n\n return point_list\n\n\ndef clean_pandas_list(list_):\n list_ = list_.replace(', ', '\",\"')\n list_ = list_.replace('[', '[\"')\n list_ = list_.replace(']', '\"]')\n return list_\n\n\ndef interleave_lists(list1:list, list2:list):\n \"alternate between 1 and 2; if they're of different lengths, it keeps going with whatever's left in the longer one\"\n return [x for both in zip(cycle(list1), list2) for x in both]\n\ndef dedup_list(li):\n \"remove duplicates while preserving order of first dup(non-dup?/orig?). cf. https://stackoverflow.com/a/480227/4259243\"\n seen = set()\n seen_add = seen.add\n return [x for x in li if not (x in seen or seen_add(x))]\n\n\ndef get_top_loss_list(\n top_losses_dir, # directory where top-losses info is stored\n ):\n \"gets list of filenames with top losses for use with preferential ordering\"\n if (top_losses_dir==None) or (not os.path.exists(top_losses_dir)): return []\n # slurp in any csv files we've got\n # note they likely have different columns, and the loss values are not comparable\n tldir_files_list = glob.glob(top_losses_dir+'/*.csv') # files in tl directory\n top_loss_list = []\n for tldir_file in tldir_files_list:\n #print(\"top losses tldir_file = \",tldir_file)\n df = pd.read_csv(tldir_file)\n fnames = dedup_list(list(df[\"filename\"]))\n if [] == top_loss_list: top_loss_list = fnames\n else: top_loss_list = interleave_lists(top_loss_list, fnames)\n return dedup_list(top_loss_list)\n\n\nclass EllipseEditor(tk.Frame):\n '''Edit ellipses for steelpan images'''\n\n def __init__(self, parent, # tk class and parent window\n meta_file_list, # list of csv files to edi\n img_bank='images/', # where images are stored\n tldir=None, # directory where top-losses info is stored\n seq=True, # ignore top losses and do sequential selection of frames (per existing annotations)\n ):\n tk.Frame.__init__(self, parent)\n self.meta_file_list = meta_file_list\n self.img_bank = img_bank\n self.top_loss_list = get_top_loss_list(tldir)\n if self.top_loss_list == []: seq = True\n if not seq: self.meta_file_list = combine_file_and_tl_lists(self.meta_file_list, self.top_loss_list)\n self.seq = seq\n\n # create a canvas\n self.width, self.height = 512, 384 # size of images\n self.fps = 15037\n self.y0 = 0# self.height # y offset for all operations\n self.readout = 700 # width for additional annotation text\n self.canvas = tk.Canvas(width=self.width + self.readout, height=20+2*self.height )\n self.canvas.pack(fill=\"both\", expand=True)\n\n self.file_index = 0 # TODO: change to grab from top losses\n self.meta_file = meta_file_list[self.file_index]\n self.img_file = str(meta_to_img_path(self.meta_file, img_bank=self.img_bank))\n\n self.mask_pred_file = ''\n self.mask_img = None\n self.showing_mask = True\n\n self.bbox_pred_file = 'top_losses/bboxes_top_losses_real.csv' # TODO: this is fragile\n self.showing_bboxes = True\n self.bbox_list = []\n self.bbox_df = pd.read_csv(self.bbox_pred_file, converters={'bblist': eval})\n self.bbox_df.apply(clean_pandas_list)\n\n self.segreg_volume_file = 'segreg_volume_f16.npy'\n self.segreg_volume = []\n if os.path.exists(self.segreg_volume_file):\n print(\"Reading seg-reg volume data file \",self.segreg_volume_file)\n # read file as memory-mapped object, cf. https://numpy.org/doc/stable/reference/generated/numpy.memmap.html#numpy.memmap\n self.segreg_volume = np.load(self.segreg_volume_file)#, mmap_mode=\"r\")\n self.segreg_volume_times = np.arange(self.segreg_volume.shape[0]) #/self.fps\n #print(\"self.segreg_volume.shape =\",self.segreg_volume.shape)\n\n self.tl_ring_count_file, self.tl_ring_count_dict = '', {}\n self.tl_ring_count_dict = defaultdict(lambda: [], self.tl_ring_count_dict) # map filenames to lists of rings info; defaults to empty list\n tl_rc_files = glob.glob(tldir+'/*ring*.csv')\n if len(tl_rc_files) > 0:\n self.tl_ring_count_file = tl_rc_files[0] # not sure I'll keep the same name. something ring-related\n self.setup_tl_ring_count_dict()\n self.showing_predrings, self.predringlist = True, [] # thing we actually use\n\n self.color = \"green\"\n\n # this data is used to keep track of an item being dragged\n self._drag_data = {\"x\": 0, \"y\": 0, \"items\": None}\n\n self._token_data = []\n self._numtokens = 0\n self.hr = 4 # handle radius\n\n # Define global event bindings\n self.canvas.bind(\"\", self.update_readout)\n self.canvas.bind(\"\", self.on_doubleclick)\n self.canvas.bind(\"\", self.on_rightpress) # on mac, button 2 is right mouse\n self.canvas.bind(\"\", self.on_rightpress) # on linux, button 3 is right mouse\n\n self.canvas.focus_set()\n self.canvas.bind(\"\", self.on_bkey)\n self.canvas.bind(\"\", self.on_bkey)\n self.canvas.bind(\"\", self.on_gkey)\n self.canvas.bind(\"\", self.on_gkey)\n self.canvas.bind(\"\", self.on_mkey)\n self.canvas.bind(\"\", self.on_mkey)\n self.canvas.bind(\"\", self.on_rkey)\n self.canvas.bind(\"\", self.on_rkey)\n self.canvas.bind(\"\", self.on_qkey)\n self.canvas.bind(\"\", self.on_qkey)\n self.canvas.bind(\"\", self.on_skey)\n self.canvas.bind(\"\", self.on_skey)\n self.canvas.bind(\"\", self.on_leftarrow)\n self.canvas.bind(\"\", self.on_rightarrow)\n self.canvas.bind('', self.mouse_move)\n\n self.infostr = \"\"\n self.text = self.canvas.create_text(self.width+10, 10+self.height, text=self.infostr,\n anchor=tk.NW, font=tk.font.Font(size=15,family='Consolas'))\n self.df = ''\n\n self.load_new_files()\n\n\n def setup_tl_ring_count_dict(self):\n \"this will store info about all the antinode ring counts for each file, as a list of lists for each file\"\n df = pd.read_csv(self.tl_ring_count_file)\n #print(df.head())\n for i, row in df.iterrows(): # stop telling me stop using iterrows, it's clear coding ;-)\n meta, parts = meta_from_str(row['filename']), row['filename'].split('_')[-5:]\n bbox, rings = [int(x) for x in parts[0:4]], float(row['prediction'])\n bb_rings = bbox + [rings]\n if len(self.tl_ring_count_dict[meta]) == 0: self.tl_ring_count_dict[meta] = [bb_rings]\n else: self.tl_ring_count_dict[meta].append(bb_rings)\n\n def setup_pred_mask(self):\n self.mask_img = None\n self.mask_pred_file = 'top_losses/seg_images/'+str(Path(self.meta_file).stem)+'_pred.png'\n if os.path.exists(self.mask_pred_file):\n self.mask_img = Image.open(self.mask_pred_file)\n if self.mask_img.size != (self.width, self.height): # to allow for half-size masks\n self.mask_img.size = self.mask_img.size.resize((self.width, self.height))\n self.mask_img = ImageOps.colorize(self.mask_img, black =\"black\", white =(150,0,150))\n return\n\n def merge_mask_image(self):\n if not self.mask_img: return\n if self.showing_mask:\n self.image = Image.blend(self.image, self.mask_img, 0.5)\n self.assign_image()\n\n def draw_pred_rings(self):\n if (not self.showing_predrings) or (len(self.predringlist) == 0): return\n for bbr in self.predringlist:\n cx, cy, ringstr = int((bbr[0]+bbr[2])/2), int((bbr[1]+bbr[3])/2), '{0:.1f}'.format(bbr[-1])\n #print(\"predicted ring counts: \",cx, cy, ringstr)\n ringtext = self.canvas.create_text(cx, self.y0+cy-15, text=ringstr, anchor=tk.CENTER, font=tk.font.Font(size=15), fill=\"yellow\")\n\n def draw_pred_bboxes(self, please_fix=True):\n if (not self.showing_bboxes) or len(self.bbox_list)==0: return\n for bb in self.bbox_list[0]:\n if please_fix:\n # icevision shrank our images and then ebedded them in 384,384, we need to undo that?\n bb = [int(x*512/384) for x in bb] # unshrink everything\n bb[1], bb[3] = bb[1]-(512-384)//2, bb[3]-(512-384)//2\n box = self.canvas.create_rectangle(bb[0],bb[1],bb[2],bb[3], outline=\"cyan\", width=2)\n\n def load_new_files(self):\n self.canvas.delete(\"all\") #destroy old tokens\n self.text = self.canvas.create_text(self.width+10, 10+self.y0, text=self.infostr,\n anchor=tk.NW, font=tk.font.Font(size=15,family='Consolas'))\n self.meta_file = self.meta_file_list[self.file_index]\n self.img_file = meta_to_img_path(self.meta_file, img_bank=self.img_bank)\n self.setup_pred_mask()\n self.read_assign_image()\n self.merge_mask_image()\n\n self.read_prev_next_imgs()\n self.bbox_list = self.bbox_df[self.bbox_df['filename'] == os.path.basename(self.meta_file)]['bblist'].tolist() # nice, huh? ;-)\n self.draw_pred_bboxes()\n self.predringlist = self.tl_ring_count_dict[os.path.basename(self.meta_file)]\n self.draw_pred_rings()\n self.read_assign_csv()\n\n\n def assign_image(self):\n self.tkimage = ImageTk.PhotoImage(image=self.image)\n self.label = tk.Label(image=self.tkimage)\n self.label.image = self.tkimage # keep a reference!\n self.canvas.create_image(self.width/2, self.y0 + self.height/2, image=self.tkimage)\n\n def read_assign_image(self):\n self.image = Image.open(self.img_file)\n self.image = ImageOps.colorize(self.image, black =\"black\", white =\"white\")\n self.backup = self.image\n self.assign_image()\n\n def read_assign_csv(self):\n col_names = ['cx', 'cy', 'a', 'b', 'angle', 'rings']\n self.df = pd.read_csv(self.meta_file,header=None,names=col_names) # read metadata file\n self.df.drop_duplicates(inplace=True) # sometimes the data from Zooniverse has duplicate rows\n # assign ellipse tokens (and their handles)\n for index, row in self.df.iterrows() :\n cx, cy, a, b, angle, rings = row['cx'], row['cy'], row['a'], row['b'], float(row['angle']), row['rings']\n a, b, angle = fix_abangle(a,b,angle)\n if (0!=rings): # 0 rings means no antinode, i.e. nothing there\n self._create_token((cx, cy), (a, b), angle, rings, self.color)\n self.update_readout(None)\n\n def read_prev_next_imgs(self):\n # prev image\n self.prev_img, name = get_next_img(self.meta_file, -1, img_bank=self.img_bank)\n if self.prev_img: self.canvas.create_image(self.width/2, self.y0 + self.height+20 + self.height/2, image=self.prev_img)\n imlabel = f\"Previous Image: {name.split('/')[-1]}\"\n self.canvas.create_text(10, self.y0+self.height, text=imlabel, anchor=tk.NW, font=tk.font.Font(size=12), fill='black')\n\n # next image\n self.next_img, name = get_next_img(self.meta_file, 1, img_bank=self.img_bank)\n if self.next_img: self.canvas.create_image(self.width+20+self.width/2, self.y0 + self.height+20 + self.height/2, image=self.next_img)\n imlabel = f\"Next Image: {name.split('/')[-1]}\"\n self.canvas.create_text(self.width+30, self.y0+self.height, text=imlabel, anchor=tk.NW, font=tk.font.Font(size=12), fill='black')\n return\n\n def _create_token(self, coord, axes, angle, rings, color):\n '''Create a tk token at the given coordinate in the given color'''\n self._numtokens += 1\n (x,y) = coord\n (a,b) = axes\n thistag = \"token\"+str(self._numtokens) # each token gets its own unique id, plus the whole ellipse gets a 'main' tag\n oval = self.canvas.create_polygon(*tuple(poly_oval(x, self.y0+y, a, b, angle=angle)),outline=color, fill='', width=3, tags=(thistag,\"main\"))\n\n # handles for resize / rotation\n h_a_x, h_a_y = x + a*np.cos(np.deg2rad(angle)), y - a*np.sin(np.deg2rad(angle))\n h_b_x, h_b_y = x + b*np.sin(np.deg2rad(angle)), y + b*np.cos(np.deg2rad(angle))\n h_a = self.canvas.create_oval(h_a_x-self.hr, self.y0+h_a_y-self.hr, h_a_x+self.hr, self.y0+h_a_y+self.hr, outline=color, fill=color, width=3, tags=(thistag,\"handle\",\"axis_a\"))\n h_b = self.canvas.create_oval(h_b_x-self.hr, self.y0+h_b_y-self.hr, h_b_x+self.hr, self.y0+h_b_y+self.hr, outline=color, fill=\"blue\", width=3, tags=(thistag,\"handle\",\"axis_b\"))\n\n ringstr = '{0:.1f}'.format(rings)\n ringtext = self.canvas.create_text(x-5, self.y0+y-10, text=ringstr, anchor=tk.NW, font=tk.font.Font(size=16), fill=color, tags=(thistag,\"ringtext\"))\n\n self._token_data.append([oval,h_a,h_b,ringtext])\n\n # Define Event Bindings for moving objects around\n self.canvas.tag_bind(\"main\", \"\", self.on_main_press)\n self.canvas.tag_bind(\"main\", \"\", self.on_main_release)\n self.canvas.tag_bind(\"main\", \"\", self.on_main_motion)\n\n self.canvas.tag_bind(\"handle\", \"\", self.on_handle_press)\n self.canvas.tag_bind(\"handle\", \"\", self.on_handle_release)\n self.canvas.tag_bind(\"handle\", \"\", self.on_handle_motion)\n\n def graph_segreg_ts(self,event):\n if 0 == len(self.segreg_volume): return\n x, y = event.x, event.y\n if (x>self.width) or (y>self.height): return\n print(\"graph_segreg_ts: x, y =\",x,y)\n slice = self.segreg_volume[:,y,x]\n plt.plot(self.segreg_volume_times, slice, 'o-')\n plt.show()\n\n def on_bkey(self,event):\n self.showing_bboxes = not self.showing_bboxes\n self.load_new_files()\n def on_gkey(self,event):\n self.showing_bboxes = not self.showing_bboxes\n self.graph_segreg_ts(event)\n def on_mkey(self,event):\n self.showing_mask = not self.showing_mask\n if self.showing_mask and self.mask_img is not None:\n self.merge_mask_image()\n self.load_new_files()\n if not self.showing_mask:\n self.image = self.backup\n self.load_new_files()\n def on_rkey(self,event):\n self.showing_predrings = not self.showing_predrings\n self.load_new_files()\n def on_qkey(self, event):\n print(\"Quitting\")\n sys.exit()\n def on_skey(self,event):\n print(\"Saving file \",self.meta_file)\n # TODO: add code to enforce a > b (and fix angle)\n self.df.to_csv(self.meta_file,index=False,header=None)\n def on_rightarrow(self,event): # right arrow on keyboard\n self.file_index += 1 # TODO: grab from top_losses\n if (self.file_index >= len(self.meta_file_list)):\n self.file_index = 0\n self.load_new_files()\n def on_leftarrow(self,event):\n self.file_index -= 1\n if (self.file_index < 0):\n self.file_index = len(self.meta_file_list)-1\n self.load_new_files()\n def mouse_move(self,event):\n x, y = event.x, event.y\n tx, ty = 2*self.width+40, 2*self.height-40\n box = self.canvas.create_rectangle(tx-5,ty, tx+110,ty+25, fill=\"white\", outline=\"white\")\n self.canvas.create_text(tx, ty, text=f'({x},{y})', anchor=tk.NW, font=tk.font.Font(size=12), fill='black')\n\n def on_main_press(self, event):\n '''Begining drag of an object'''\n obj_id = self.canvas.find_closest(event.x, event.y)[0] # record the item and its location\n tags = self.canvas.gettags( obj_id )\n self._drag_data[\"items\"] = tags[0]\n self._drag_data[\"x\"] = event.x\n self._drag_data[\"y\"] = event.y\n\n def on_main_release(self, event):\n '''End drag of an object'''\n # if object is off the screen, delete it\n if ((event.x < 0) or (event.y < 0 ) or (event.x > self.width) or (event.y > self.height)):\n self.canvas.delete(self._drag_data[\"items\"])\n self.update_readout(None)\n # reset the drag information\n self._drag_data[\"items\"] = None\n self._drag_data[\"x\"] = 0\n self._drag_data[\"y\"] = 0\n\n def on_main_motion(self, event):\n '''Handle dragging of an object'''\n # compute how much the mouse has moved\n delta_x = event.x - self._drag_data[\"x\"]\n delta_y = event.y - self._drag_data[\"y\"]\n # move the object the appropriate amount\n self.canvas.move(self._drag_data[\"items\"], delta_x, delta_y)\n # record the new position\n self._drag_data[\"x\"] = event.x\n self._drag_data[\"y\"] = event.y\n\n\n\n def retrieve_ellipse_info(self, tokentag):\n # retrieves info for whichever single ellipse is currently being manipulated\n tokenitems = self.canvas.find_withtag( tokentag )\n [main_id, axis_a_id, axis_b_id, ringtext_id ]= tokenitems\n\n ell_coords = self.canvas.coords(main_id) # coordinates of all points in ellipse\n cxoords, cyoords = ell_coords[0::2], ell_coords[1::2]\n cx, cy = np.mean( cxoords ), np.mean( cyoords ) # coordinates of center of ellipse\n\n h_a_coords = self.canvas.coords(axis_a_id)\n h_a_x, h_a_y = np.mean( h_a_coords[0::2] ), np.mean( h_a_coords[1::2] )\n a = np.sqrt( (h_a_x - cx)**2 + (h_a_y - cy)**2 )\n\n h_b_coords = self.canvas.coords(axis_b_id)\n h_b_x, h_b_y = np.mean( h_b_coords[0::2] ), np.mean( h_b_coords[1::2] )\n b = np.sqrt( (h_b_x - cx)**2 + (h_b_y - cy)**2 )\n\n angle = np.rad2deg( np.arctan2( cy - h_a_y, h_a_x - cx) )\n\n rings = self.canvas.itemcget(ringtext_id, 'text')\n\n return cx, cy, a, b, angle, rings, ell_coords\n\n\n def update_df(self):\n # iterate through all ellipses -- i.e. tokens with with 'main' tag\n tokenitems = self.canvas.find_withtag( 'main' )\n\n\n def on_handle_press(self, event):\n '''Begining drag of an handle'''\n # record the item and its location\n ids = self.canvas.find_closest(event.x, event.y)[0]\n tags = self.canvas.gettags( ids )\n self._drag_data[\"items\"] = ids\n self._drag_data[\"x\"] = event.x\n self._drag_data[\"y\"] = event.y\n\n def on_handle_release(self, event):\n '''End drag of an handle'''\n # reset the drag information\n self._drag_data[\"items\"] = None\n self._drag_data[\"x\"] = 0\n self._drag_data[\"y\"] = 0\n\n def on_handle_motion(self, event):\n '''Handle dragging of an handle'''\n # compute how much the mouse has moved\n oldx, oldy = self._drag_data[\"x\"], self._drag_data[\"y\"]\n delta_x = event.x - oldx\n delta_y = event.y - oldy\n # move the handle the appropriate amount\n self.canvas.move(self._drag_data[\"items\"], delta_x, delta_y)\n\n # what are the tags for this particular handle\n tags = self.canvas.gettags( self._drag_data[\"items\"] )\n tokentag = tags[0]\n cx, cy, a, b, angle, rings, coords = self.retrieve_ellipse_info( tokentag )\n\n tokenitems = self.canvas.find_withtag( tokentag )\n [main_id, axis_a_id, axis_b_id, ringtext_id ]= tokenitems\n\n new_r = np.sqrt( (event.x -cx)**2 + (event.y - cy)**2 )\n new_angle = np.rad2deg( np.arctan2( cy-oldy, oldx-cx) )\n\n # which handle is currently being manipulated?\n if (\"axis_a\" in tags):\n b_coords = self.canvas.coords(axis_b_id)\n h_b_x, h_b_y = np.mean( b_coords[0::2] ), np.mean( b_coords[1::2] )\n new_coords = poly_oval( cx, cy, new_r, b, angle=new_angle)\n h_b_x, h_b_y = cx + b*np.sin(np.deg2rad(new_angle)), cy + b*np.cos(np.deg2rad(new_angle))\n self.canvas.coords(axis_b_id, [ h_b_x-self.hr, h_b_y-self.hr, h_b_x+self.hr, h_b_y+self.hr] )\n elif (\"axis_b\" in tags):\n a_coords = self.canvas.coords(axis_a_id)\n h_a_x, h_a_y = np.mean( a_coords[0::2] ), np.mean( a_coords[1::2] )\n new_angle = new_angle + 90 # a and b axes are offset by 90 degrees; angle is defined relative to a axis\n new_coords = poly_oval( cx, cy, a, new_r, angle=new_angle)\n h_a_x, h_a_y = cx + a*np.cos(np.deg2rad(new_angle)), cy - a*np.sin(np.deg2rad(new_angle))\n self.canvas.coords(axis_a_id, [ h_a_x-self.hr, h_a_y-self.hr, h_a_x+self.hr, h_a_y+self.hr] )\n else:\n print(\"Error: bad tags\")\n assert(0==1)\n\n self.canvas.coords(main_id, new_coords) # reassign coords for the entire ellipse (i.e. 'redraw')\n # record the new position\n self._drag_data[\"x\"] = event.x\n self._drag_data[\"y\"] = event.y\n\n\n def on_doubleclick(self, event): # create a new ellipse\n cx, cy, a, b, angle, rings = event.x, event.y, 50, 50, 0, 1 # give it some default data\n self._create_token((cx, cy), (a, b), angle, rings, self.color)\n self.update_readout(None)\n\n def on_rightpress(self,event):\n obj_id = self.canvas.find_closest(event.x, event.y)[0]\n tags = self.canvas.gettags( obj_id )\n ringtext = self.canvas.itemcget(obj_id, 'text')\n result = askfloat(\"How many rings\", \"How many rings?\", initialvalue=float(ringtext))\n\n self.canvas.itemconfigure(obj_id,text=str(result))\n self.canvas.focus_set() # that dialog box stole the focus. get it back\n self.update_readout(None)\n\n def update_readout(self, event):\n mains = self.canvas.find_withtag( \"main\" )\n self.infostr = self.meta_file+'\\n'+str(self.img_file)+'\\n\\n'\n\n new_df = pd.DataFrame(columns=self.df.columns)\n # first we update the dataframe info\n for main_id in mains:\n tokentag = self.canvas.gettags( main_id )[0]\n cx, cy, a, b, angle, rings, coords = self.retrieve_ellipse_info( tokentag )\n a,b,angle = fix_abangle(a,b,angle)\n [cx, cy, a, b, angle] = [int(round(x)) for x in [cx, cy, a, b, angle]]\n #x, cy, a, b, angle = int(round(cx,0)), int(round(cy,0)), int(round(a,0), round(b,0), round(angle,0)\n new_df = new_df.append({'cx':cx, 'cy':cy, 'a':a, 'b':b, 'angle':angle, 'rings':rings},ignore_index=True)\n\n self.df = new_df\n self.infostr += self.df.to_string(index=False,justify='left') # then we output the dataframe info to a string\n self.canvas.itemconfigure(self.text, text=self.infostr) # then we re-assign the text widget with the new string\n\n\n\n@call_parse\ndef ellipse_editor(\n seq:Param(\"Ignore top-loss ordering and do sequential ordering\", store_true),\n files:Param(\"Wildcard name for all CSV files to edit\", str)='annotations/*.csv',\n imgbank:Param(\"Directory where all the (unlabeled) images are\",str)='images/',\n tldir:Param(\"Directory where 'top losses' info is stored'\",str)='top_losses/',\n ):\n global img_bank\n # typical command-line calling sequence:\n # $ ./ellipse_editor.py *.csv\n files = ''.join(files)\n meta_file_list = sorted(glob.glob(files))\n img_bank = imgbank\n\n print(\"Instructions:\")\n print(\" Mouse bindings:\")\n print(\" - Double-click to create ellipse\")\n print(\" - Left-click and drag inside ellipse (but not on a number) to move ellipse\")\n print(\" - Left-click and drag 'handles' to resize/rotate ellipse (solid = 'a', hollow = 'b')\")\n print(\" - Right-click (or middle click) inside number to change ring count\")\n print(\" - Drag off-screen to destroy/delete ellipse\")\n print(\" Key bindings:\")\n print(\" - Right Arrow : Next file\")\n print(\" - Left Arrow : Previous file\")\n print(\" - M : Toggle display of predicted segmentation mask (if available)\")\n print(\" - R : Toggle display of predicted ring counts (if available)\")\n print(\" - B : Toggle display of predicted bounding boxes (if available)\")\n print(\" - S : Save metadata\")\n print(\" - Q : Quit\")\n\n\n root = tk.Tk()\n root.title('espiownage: ellipse_editor')\n EllipseEditor(root, meta_file_list, img_bank=img_bank, tldir=tldir, seq=seq).pack(fill=\"both\", expand=True)\n root.mainloop()\n","repo_name":"drscotthawley/espiownage","sub_path":"espiownage/ellipse_editor.py","file_name":"ellipse_editor.py","file_ext":"py","file_size_in_byte":26417,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"50"} +{"seq_id":"39709288992","text":"\"\"\"End to end testing on siamese nets\n\"\"\"\n\n# pylint: disable=C0103\n# pylint: disable=C0325\n# pylint: disable=E1101\n\n\nimport numpy as np\nfrom scipy.stats import zscore\n\nfrom modelwrangler.corral.convolutional_siamese import ConvolutionalSiamese\nfrom modelwrangler.tester import ModelTester\n\n\ndef make_timeseries_testdata(in_dim=100, n_samp=1000):\n \"\"\"Make sample data for linear regression\n \"\"\"\n\n signal = zscore(np.random.randn(n_samp, 3), axis=0)\n\n X = zscore(np.random.randn(n_samp, in_dim), axis=0)\n for i in range(X.shape[1]):\n X[:, i] += 0.1 * signal[:, (i % signal.shape[1])]\n return X\n\ndef test_conv_siamese(dim=48):\n \"\"\"Test dense autoencodes\n \"\"\"\n\n X0 = make_timeseries_testdata(in_dim=dim)\n X0 = X0[:, :, np.newaxis]\n\n X1 = make_timeseries_testdata(in_dim=dim)\n X1 = X1[:, :, np.newaxis]\n\n Y = np.array([i % 2 for i in range(X0.shape[0])]).reshape(-1, 1)\n\n convsiam_network = ConvolutionalSiamese(\n in_size=dim,\n out_size=3,\n conv_nodes=[3],\n conv_params={\n 'dropout_rate': 0.1,\n 'kernel': 3,\n 'strides': 2,\n 'pool_size': 2,\n },\n dense_nodes=[2],\n dense_params={\n 'dropout_rate': 0.1,\n 'activation': 'relu',\n },\n output_params={\n \"dropout_rate\": None,\n \"activation\": 'linear',\n },\n )\n\n\n print(convsiam_network.score([X0, X1], Y))\n for _ in range(5):\n convsiam_network.train([X0, X1], Y)\n print(convsiam_network.score([X0, X1], Y))\n\nif __name__ == \"__main__\":\n\n print('\\n\\nunit testing siamese net')\n ModelTester(ConvolutionalSiamese)\n\n print(\"\\n\\ne2e testing ConvolutionalSiamese\")\n test_conv_siamese()\n","repo_name":"brenton-enigma/modelwrangler","sub_path":"tests/test_siamese.py","file_name":"test_siamese.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"27967379728","text":"import enum\nimport uuid\n\nfrom sqlalchemy import (\n CHAR,\n JSON,\n Boolean,\n Column,\n DateTime,\n Enum,\n ForeignKey,\n Integer,\n String,\n UniqueConstraint,\n)\nfrom sqlalchemy.dialects.postgresql import UUID\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.sql import func\nfrom sqlalchemy.types import CHAR, TypeDecorator\n\n\nclass GUID(TypeDecorator):\n \"\"\"Platform-independent GUID type.\n\n Uses PostgreSQL's UUID type, otherwise uses\n CHAR(32), storing as stringified hex values.\n\n \"\"\"\n\n impl = CHAR\n\n def load_dialect_impl(self, dialect):\n if dialect.name == \"postgresql\":\n return dialect.type_descriptor(UUID())\n else:\n return dialect.type_descriptor(CHAR(32))\n\n def process_bind_param(self, value, dialect):\n if value is None:\n return value\n elif dialect.name == \"postgresql\":\n return str(value)\n else:\n if not isinstance(value, uuid.UUID):\n return \"%.32x\" % uuid.UUID(value).int\n else:\n # hexstring\n return \"%.32x\" % value.int\n\n def process_result_value(self, value, dialect):\n if value is None:\n return value\n else:\n if not isinstance(value, uuid.UUID):\n value = uuid.UUID(value)\n return value\n\n\nBase = declarative_base()\n\n\nclass Team(Base):\n __tablename__ = \"teams\"\n id = Column(Integer, primary_key=True)\n name = Column(String, nullable=False)\n team_code = Column(String, nullable=False)\n game_id = Column(Integer, ForeignKey(\"games.id\"), nullable=False)\n players = relationship(\"PlayerInGame\", back_populates=\"team\")\n\n quizadmin = Column(Boolean, default=False, nullable=False)\n\n __table_args__ = (\n UniqueConstraint(\"game_id\", \"team_code\", name=\"team_code_unique_in_game\"),\n )\n\n\nclass Player(Base):\n __tablename__ = \"players\"\n id = Column(Integer, primary_key=True)\n uuid = Column(GUID, default=uuid.uuid4, unique=True)\n name = Column(String, nullable=False)\n color = Column(String, nullable=False)\n\n sub_players = relationship(\"PlayerInGame\", back_populates=\"player\")\n\n\nclass PlayerInGame(Base):\n __tablename__ = \"player_game\"\n id = Column(Integer, primary_key=True)\n player_id = Column(Integer, ForeignKey(\"players.id\"), nullable=False)\n game_id = Column(Integer, ForeignKey(\"games.id\"), nullable=False)\n team_id = Column(Integer, ForeignKey(\"teams.id\"), nullable=True)\n\n team = relationship(\"Team\", back_populates=\"players\")\n player = relationship(\"Player\", back_populates=\"sub_players\")\n game = relationship(\"Game\")\n answers = relationship(\"GivenAnswer\", back_populates=\"player\")\n voted_for = relationship(\"Vote\")\n\n __table_args__ = (\n UniqueConstraint(\"player_id\", \"game_id\", name=\"subplayer_unique_in_game\"),\n )\n\n def __str__(self):\n vars = \"id player_id game_id team_id\".split()\n return f\"{self.__class__.__name__}: {' '.join([f'{v}={getattr(self, v)}' for v in vars])}\"\n\n\nclass Selected(enum.Enum):\n true = True\n\n\nclass GivenAnswer(Base):\n __tablename__ = \"given_answers\"\n id = Column(Integer, primary_key=True)\n uuid = Column(GUID, default=uuid.uuid4, unique=True)\n answer = Column(String, nullable=False)\n question_uuid = Column(GUID, ForeignKey(\"questions.uuid\"))\n player_id = Column(Integer, ForeignKey(\"player_game.id\"))\n player = relationship(\"PlayerInGame\", back_populates=\"answers\")\n question = relationship(\"Question\")\n\n time_created = Column(DateTime(timezone=True), server_default=func.now())\n time_updated = Column(DateTime(timezone=True), onupdate=func.now())\n\n votes = relationship(\"Vote\")\n is_selected = Column(Enum(Selected))\n __table_args__ = (UniqueConstraint(\"question_uuid\", \"player_id\", \"is_selected\"),)\n\n\nclass Question(Base):\n __tablename__ = \"questions\"\n id = Column(Integer, primary_key=True)\n uuid = Column(GUID, default=uuid.uuid4, unique=True)\n question = Column(String, nullable=False)\n game_id = Column(Integer, ForeignKey(\"games.id\"))\n game = relationship(\"Game\", back_populates=\"questions\")\n is_active = Column(Boolean, default=False)\n\n\nclass Vote(Base):\n __tablename__ = \"votes\"\n id = Column(Integer, primary_key=True)\n answer_id = Column(\"answer_id\", Integer, ForeignKey(\"given_answers.id\"), index=True)\n subplayer_id = Column(\"subplayer_id\", Integer, ForeignKey(\"player_game.id\"))\n\n __table_args__ = (\n UniqueConstraint(\n \"subplayer_id\", \"answer_id\", name=\"subplayer_unique_in_answer\"\n ),\n )\n\n\nclass Game(Base):\n __tablename__ = \"games\"\n id = Column(Integer, primary_key=True)\n uuid = Column(GUID, default=uuid.uuid4, unique=True)\n name = Column(String)\n questions_ordered = Column(JSON)\n questions = relationship(\"Question\", order_by=Question.id, back_populates=\"game\")\n num_questions = Column(Integer, default=20)\n\n teams = relationship(\"Team\", backref=\"game\")\n","repo_name":"Debilski/Social-Quiztancing","sub_path":"backend/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":5075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"16393224449","text":"def solution(board, moves):\n count = 0\n basket = [0]\n turn_board = [list(reversed([i for i in col if i])) for col in zip(*board)]\n for idx in moves:\n if not turn_board[idx-1]:\n continue\n item = turn_board[idx-1].pop()\n if item == basket[-1]:\n basket.pop()\n count += 2\n else:\n basket.append(item)\n return count\n\n\n# 테스트 코드\nprint(solution([[0, 0, 0, 0, 0], [0, 0, 1, 0, 3], [0, 2, 5, 0, 1], [4, 2, 4, 4, 2], [3, 5, 1, 3, 1]],\n [1, 5, 3, 5, 1, 2, 1, 4]))\n","repo_name":"Kimdonghyeon7645/python_school-study","sub_path":"정보보안/11월17일-스텍-크레인 인형뽑기 게임.py","file_name":"11월17일-스텍-크레인 인형뽑기 게임.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"6369073833","text":"import demistomock as demisto\nfrom CommonServerPython import * # noqa: E402 lgtm [py/polluting-import]\n\n\ndef _get_incident():\n return demisto.incidents()[0]\n\n\ndef closeCase():\n incident = _get_incident()\n close_reason = demisto.args().get('closeReason')\n close_notes = demisto.args().get('closeNotes', '')\n action = 'closeCase'\n subOption = 'True Incident'\n\n if close_reason is not None and close_reason == \"False Positive\":\n action = \"modelReviewCase\"\n subOption = \"Tuning Required\"\n elif close_reason is not None and close_reason == \"Other\":\n action = \"modelReviewCase\"\n subOption = \"Others\"\n\n _caseId = \"\"\n for label in incident['labels']:\n if label['type'] == 'caseId':\n _caseId = label['value']\n break\n\n if _caseId == \"\":\n raise Exception('caseId was not found in the incident labels')\n\n demisto.executeCommand('gra-case-action', {\n 'action': action,\n 'subOption': subOption,\n 'caseId': _caseId,\n 'caseComment': close_notes\n })\n\n\ndef main():\n try:\n closeCase()\n except Exception as ex:\n return_error(f'Failed to execute gra-case-close-post-processing. Error: {str(ex)}')\n\n\nif __name__ in ('__main__', '__builtin__', 'builtins'):\n main()\n","repo_name":"demisto/content","sub_path":"Packs/Gurucul/Scripts/graupdatecasestatus/graupdatecasestatus.py","file_name":"graupdatecasestatus.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":1023,"dataset":"github-code","pt":"50"} +{"seq_id":"15550665304","text":"\"\"\"\nМногопоточное чтение кода состояния\n\"\"\"\nimport threading\nimport time\n\nimport requests\n\nTHREADS_QTY = 10\n\n\ndef read_example() -> None:\n response = requests.get(\"https://hazadus.ru\")\n print(f\"{threading.current_thread().name}, status code: {response.status_code}\")\n\n\nthread_pool = []\n\n# Create all threads\nfor i in range(0, THREADS_QTY):\n thread = threading.Thread(target=read_example)\n thread_pool.append(thread)\n\nstart = time.time()\n\n# Start each thread\nfor thread in thread_pool:\n thread.start()\n print(f\"Thread {thread} started.\")\n\nprint(\"All threads started!\")\n\n# Join each thread\nfor thread in thread_pool:\n thread.join()\n\nend = time.time()\n\nprint(f\"Execution time: {end - start:.4f} sec.\")\n","repo_name":"hazadus/asyncio-learn","sub_path":"ch1/listing_1_7a.py","file_name":"listing_1_7a.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"34655755464","text":"_author_ = 'jake'\n_project_ = 'leetcode'\n\n# https://leetcode.com/problems/course-schedule-ii/\n# There are a total of n courses you have to take, labeled from 0 to n - 1.\n# Some courses may have prerequisites, for example to take course 0 you have to first take course 1,\n# which is expressed as a pair: [0,1]. Given the total number of courses and a list of prerequisite pairs,\n# return the ordering of courses you should take to finish all courses. There may be multiple correct orders,\n# you just need to return one of them. If it is impossible to finish all courses, return an empty array.\n\n# As per problem 207, find courses with no prerequisites and remove dependencies on such courses.\n# Time - O(m + n)\n# Space - O(m + n)\n\nfrom collections import defaultdict\n\nclass Solution(object):\n def findOrder(self, numCourses, prerequisites):\n \"\"\"\n :type numCourses: int\n :type prerequisites: List[List[int]]\n :rtype: List[int]\n \"\"\"\n order = []\n nb_prerequisites = defaultdict(int) # key is course, value is number of prerequisite courses\n prereq_list = defaultdict(list) # key is a course, value is list of courses that depend on course\n\n for after, before in prerequisites:\n nb_prerequisites[after] += 1\n prereq_list[before].append(after)\n\n can_take = set(i for i in range(numCourses)) - set(nb_prerequisites.keys())\n\n while can_take:\n\n course = can_take.pop() # take any course with no prerequisites\n order.append(course)\n for dependent in prereq_list[course]:\n nb_prerequisites[dependent] -= 1 # decrement count of dependencies\n if nb_prerequisites[dependent] == 0: # no more prerequisites\n can_take.add(dependent)\n\n return order if len(order) == numCourses else []","repo_name":"jakehoare/leetcode","sub_path":"python_1_to_1000/210_Course_Schedule_II.py","file_name":"210_Course_Schedule_II.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"50"} +{"seq_id":"11706216831","text":"import rbcde.matrix\nimport pandas as pd\ntry:\n\tfrom scanpy import logging as logg\nexcept ImportError:\n\tpass\n\ndef RBC(adata, clus_key='leiden', layer=None, use_raw=False):\n\t'''\n\tCompute the rank-biserial correlation coefficient for each gene in each cluster. The \n\tresults can be subsequently turned into a marker list via the helper function \n\t``rbcde.filter_markers()``. The primary output is stored as part of either `.var` or \n\t`.raw.var`, depending on whether `.raw` data is used.\n\t\n\tThe rank-biserial correlation coefficient \n\t`(Cureton, 1956) `_ \n\tcan be used as an effect size equivalent of the Wilcoxon test \n\t`(Kerby, 2014) `_, which in \n\tturn was deemed to perform well on single cell data problems \n\t`(Soneson, 2018) `_. Using effect size \n\tanalyses is recommended for problems with large population sizes \n\t`(Sullivan, 2012) `_.\n\t\n\tInput\n\t-----\n\tadata : ``AnnData``\n\t\tNeeds per cell normalised data stored somewhere in the object (as either sparse or \n\t\tdense), and the desired clustering/grouping vector included in `.obs`.\n\tclus_key : ``str``, optional (default: \"leiden\")\n\t\tThe name of the `.obs` column containing the clustering/grouping.\n\tlayer : ``str`` or ``None``, optional (default: ``None``)\n\t\tIf specified, take the expression data from the matching ``.layers`` field. Overrides \n\t\t``use_raw`` if provided.\n\tuse_raw : ``bool``, optional (default: ``False``)\n\t\tIf no ``layer`` was specified and this is set to ``True``, take the data from the \n\t\t``.raw`` field of the object. Store results in ``.raw.var`` to match dimensionality.\n\t'''\n\t\n\tstart = logg.info('computing rank-biserial correlation')\n\t#extract appropriate data form, along with gene names\n\t#layer trumps use_raw\n\traw_prefix = ''\n\tif layer is not None:\n\t\tdata = adata.layers[layer]\n\t\tgenes = adata.var_names\n\telse:\n\t\t#try to get out .raw if possible (and specified)\n\t\tif adata.raw is not None and use_raw:\n\t\t\tdata = adata.raw.X.copy()\n\t\t\tgenes = adata.raw.var_names\n\t\t\traw_prefix = '.raw'\n\t\telse:\n\t\t\tdata = adata.X.copy()\n\t\t\tgenes = adata.var_names\n\t#extract cluster list\n\tclusters = adata.obs[clus_key].copy()\n\t#think that's everything we need. call matrix version of function\n\trbc_out = rbcde.matrix.RBC(data, clusters, genes)\n\t#append the fact this is RBC output to each column name and stash it in var\n\tfor col in rbc_out.columns:\n\t\tif use_raw:\n\t\t\tadata.raw.var['RBC_'+col] = rbc_out[col]\n\t\telse:\n\t\t\tadata.var['RBC_'+col] = rbc_out[col]\n\tlogg.info('\tfinished', time=start,\n\t\tdeep=(\n\t\t\t'added\\n'\n\t\t\t\"\t'RBC_' columns for each of the clusters to \"+raw_prefix+\".var\"\n\t\t),)\n\ndef filter_markers(adata, thresh=0.5, use_raw=False):\n\t'''\n\tFilter the rank-biserial correlation coefficients computed with ``rbcde.RBC()`` to a \n\tlist of markers for each cluster, provided as a data frame and a Scanpy plotting compatible \n\t``var_names`` cluster marker dictionaty. Returns those two objects, in this order.\n\t\n\tInput\n\t-----\n\tadata : ``AnnData``\n\t\tNeeds to have been processed with ``rbcde.RBC()``.\n\tthresh : ``float``, optional (default: 0.5)\n\t\tThe threshold value used to call markers. Literature \n\t\t`critical values `_ \n\t\tcan be used.\n\tuse_raw : ``bool``, optional (default: ``False``)\n\t\tSet this to ``True`` if the raw data was used for the computation so that the \n\t\tresults can be retrieved from the correct field of the object.\n\t'''\n\t\n\t\n\t#extract the RBC results embedded in .var and remove the prefix\n\tif use_raw:\n\t\tresults = adata.raw.var.loc[:,[i.startswith('RBC_') for i in adata.raw.var.columns]]\n\telse:\n\t\tresults = adata.var.loc[:,[i.startswith('RBC_') for i in adata.var.columns]]\n\tresults.columns = [i.replace('RBC_','',1) for i in results.columns]\n\t#call the matrix version to get a marker data frame\n\tdegs = rbcde.matrix.filter_markers(results, thresh)\n\t#parse up a plotting cluster marker dictionary\n\tplot_dict = {}\n\tfor clus in results.columns:\n\t\tplot_dict[clus] = degs.loc[degs['cluster']==clus, :].index\n\t\tlogg.hint(str(len(plot_dict[clus]))+' markers found for cluster '+clus)\n\t#return both the data frame and the plot-ready form\n\treturn degs, plot_dict","repo_name":"Teichlab/rbcde","sub_path":"rbcde/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4338,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"50"} +{"seq_id":"12918303931","text":"from django.conf.urls import url\n\nfrom . import views\n# from rest_framework.urlpatterns import format_suffix_patterns\nfrom . import api\n\nweb_urls = [\nurl(r'^admin/upload/(?P\\d+)/$', views.multi_upload, name='multi-upload'),\n url(r'^admin/delete/(?P\\d+)/$', views.upload_delete, name='image-delete'),\n url(r'^$', views.album_list, name='album-list'),\n url(r'^(?P[\\w-]+)/', views.AlbumDetail.as_view(), name='album-images'),\n]\n\napi_urls = [\n # url(r'^api/images/$', api.ImageListAPI.as_view()),\n # url(r'^api/albums/$', api.AlbumListAPI.as_view()),\n # url(r'^api/albums/(?P\\d+)/$', api.AlbumDetailAPI.as_view()),\n]\n\n# api_urls = format_suffix_patterns(api_urls)\n\nurlpatterns = web_urls + api_urls\n","repo_name":"vimm0/website-backend","sub_path":"apps/gallery/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"32099093302","text":"import csv\n\nwith open('data.csv', 'r') as f:\n data = csv.reader(f)\n dat = []\n t=0\n for i in data:\n dat.append(i)\nwith open('metrics.csv', 'r') as f:\n metrics = csv.reader(f)\n print(type(metrics))\n met=[]\n for i in metrics:\n met.append(i)\nprint(len(met))\nprint(len(dat))\nid=[]\nk=[]\nnewdat=[]\n\nfor i in met:\n id.append(i[0])\nfor i in range(1,len(dat)):\n if dat[i][0] not in id:\n k.append(i)\n# print(k)\n\nfor i in range(0,len(dat)):\n if i not in k:\n newdat.append(dat[i])\n\n\n# print(newdat[0],newdat[1])\n\nwith open('newd.csv', \"w\", newline='') as f:\n # with open(birth_weight_file, \"w\") as f:\n writer = csv.writer(f)\n writer.writerows(newdat)\n f.close()\n# a=[[1,2],[2,3],[3,3]]\n# b=[2,3,4]\n# c=9\n# for i in range(0,len(a)):\n# if a[i][0] not in b:\n# c=i\n# del a[c]\n# print(a)","repo_name":"AnElegantHusky/BioInfo","sub_path":"代码/提交文件pt1/chuli.py","file_name":"chuli.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"40819414327","text":"# REVISIÓN PRELIMINAR DE NÓMINA\r\n\r\n#----Librerías----------------------------------------------------------------#\r\n\r\nfrom msilib import text \r\nfrom turtle import color\r\nimport PySimpleGUI as sg\r\nimport pandas as pd\r\nimport numpy as np\r\nimport openpyxl\r\nimport string\r\n\r\n\r\n#------Funciones--------------------------------------------------------------#\r\n\r\n# Encontrar la columna en excel ingresando un valor numerico. La 0 es la A.\r\ndef num_a_col_excel(num):\r\n col = \"\"\r\n while num > 0:\r\n num -= 1\r\n col = chr(num % 26 + 65) + col\r\n num = num // 26\r\n return col\r\n \r\n#FORMATEA ARCHIVO DE NOMINA HORIZONTAL\r\ndef format_nomina_horizontal(path_nom_hori):\r\n\r\n \"\"\"\r\n Esta función toma como argumento el archivo de nómina horizontal tal y como se genera del software NgSoft\r\n y lo formatea de modo que sea trabajable (quita columnas compartidas y conserva solo información necesaria)\r\n \r\n \"\"\"\r\n\r\n df=pd.read_excel(path_nom_hori,header=8)\r\n columns=df.columns.tolist()\r\n\r\n #Creamos Lista con nombres de columnas que no contienen \"Unnamed (estas surgen por dejar de compartir una celda)\"\r\n names=[]\r\n\r\n for column in columns:\r\n if \"Unnamed\" in column:\r\n pass\r\n else:\r\n temp=column\r\n names.append(temp)\r\n\r\n #Conservamos solo columnas que no contienen \"Unnamed\"\r\n df_nom=df[df.columns[df.columns.isin(names)]]#.dropna()\r\n\r\n #Cambiamos el nombre de algunas columnas por como aparece en la primera fila - omitimos cuando fila 1 = nan\r\n column_names=df_nom.columns.tolist()\r\n row_1=df_nom.iloc[0,:].tolist()\r\n\r\n test=pd.DataFrame(column_names,row_1).reset_index().rename(columns={0:\"name\",\"index\":\"row1\"}).dropna()\r\n\r\n #Creamos el diccionario para renombrar\r\n dict_names=dict(zip(test.name, test.row1))\r\n\r\n #Renombrando columnas y quitando total\r\n df_nom=df_nom.rename(columns=dict_names).iloc[1:,:-3].reset_index(drop=True).dropna()\r\n\r\n #Eliminamos dígito de verificación a la columna CC:\r\n CEDULA=[]\r\n for item in df_nom.CC:\r\n if isinstance(item,float):\r\n temp=int(item)\r\n CEDULA.append(temp)\r\n elif isinstance(item,int):\r\n temp=item\r\n CEDULA.append(temp)\r\n elif '-' in item:\r\n temp=item.split(\"-\",1)[0]\r\n CEDULA.append(temp)\r\n else:\r\n temp=item\r\n CEDULA.append(temp) \r\n\r\n df_nom.CC=CEDULA\r\n df_nom[\"CC\"]=df_nom[\"CC\"].astype(\"int64\")\r\n\r\n #Reemplazamos \",\" por nada para poder sumar valores en cada columna que aplique:\r\n for column in df_nom.columns.tolist(): \r\n if \"-\" in column:\r\n aux=[]\r\n for value in df_nom[column]:\r\n temp=value.replace(\",\",\"\")\r\n temp=float(temp)\r\n aux.append(temp)\r\n df_nom[column]=aux\r\n else:\r\n pass\r\n\r\n return df_nom\r\n\r\n#MAESTRO DE NÓMINA PARA FECHA DE INGRESO Y CODIGO\r\ndef maestro(path_maestro):\r\n\r\n \"\"\" \r\n Esta función lee el informe MAESTRO que se genera de Ngsoft y \r\n conserla las columnas que necesitamos: \"CODIGO\" y \"FECHA DE INGRESO\"\r\n\r\n \"\"\"\r\n\r\n df_2=pd.read_excel(path_maestro)#, engine='openpyxl')\r\n df_2.rename(columns={df_2.columns[3]:\"CC\"},inplace=True)\r\n df_2=df_2[[\"CC\",\"codigo_empleado\",\"fecha_ingreso_contrato\"]]\r\n \r\n #Borrando los duplicados y conservando la fecha más reciente: \r\n df_2=df_2.sort_values('fecha_ingreso_contrato').drop_duplicates('CC',keep='last')\r\n return df_2\r\n\r\n#DISTRIBUCIÓN DE SALARIOS DEL MES ANTERIOR\r\ndef rev_nom_anterior(path_revi_nomi):\r\n\r\n \"\"\"\"\r\n Esta función toma la revisión del mes anterior para traer las columnas\r\n \"TIPO DE SALARIO\", \"% AL 100\", \"% FLI SALARIO BASICO\", \"% FLI\", \"SALARIO TOTAL\"\r\n \"\"\"\r\n\r\n df_5=pd.read_excel(path_revi_nomi)\r\n df_5=df_5.iloc[:,[0,9,10,11,12,13]]\r\n return df_5\r\n\r\n#INCREMENTOS SALARIALES\r\ndef novedades_nomina(path_novedades,df_revision):\r\n\r\n \"\"\"\r\n Esta función abre el archivo de novedades de nómina para aplicar aumentos salariales\r\n \r\n \"\"\"\r\n xls = pd.ExcelFile(path_novedades)\r\n\r\n #Leemos la hoja donde están los aumentos salariales (16):\r\n df14_16=xls.parse(16,header=2).iloc[:,[0,6]]\r\n\r\n \r\n df14_16.rename(columns={\"CEDULA \":\"CC\",df14_16.columns[1]:\"TOTAL SALARIO\"},inplace=True)\r\n\r\n global CC_INCRE,df_replace\r\n #Creamos lista de de CEDULA y de TOTAL SALARIO:\r\n CC_INCRE=list(df14_16[\"CC\"])\r\n\r\n #FILTAMOS EL ARCHIVO DE REVISIÓN PARA LAS PERSONAS QUE TUVIERON INCREMENTO PARA REEMPLAZAR EL VALOR:\r\n\r\n df_replace=df_revision[df_revision[\"CC\"].isin(CC_INCRE)]\r\n df_no_replace=df_revision[df_revision[\"CC\"].isin(CC_INCRE)==False]\r\n\r\n df_replace[\"SALARIO TOTAL\"]=list(df_replace.merge(df14_16,on=\"CC\",how=\"left\")[\"TOTAL SALARIO\"])\r\n \r\n df_replace[\"AUMENTO DE SALARIO\"] = df_replace[\"CC\"].apply(lambda x: \"SI\" if x in CC_INCRE else \"NO\")\r\n\r\n\r\n return df_replace, df_no_replace\r\n\r\n#Leer info de personal que ingresa\r\ndef ingresos(path_novedades,df_revision):\r\n global fecha\r\n\r\n \"\"\"\r\n Esta función extrae la información del archivo de novedades de las personas \r\n que ingresaron en el mes de revisión a la compañía, las variables que extrae\r\n son: \"COMPENSACION\", \"TIPO DE SALARIO\", \"FECHA DE INGRESO\".\r\n\r\n \"\"\"\r\n\r\n xls = pd.ExcelFile(path_novedades) \r\n df14_6=xls.parse(5)\r\n\r\n \r\n df14_6=df14_6.iloc[:,[0,3,4,5]].dropna()\r\n df14_6[df14_6.columns[0]]=df14_6[df14_6.columns[0]].astype(\"int64\")\r\n df14_6=df14_6[df14_6[df14_6.columns[3]]<=fecha].reset_index(drop=True)\r\n \r\n \r\n\r\n #Listas:\r\n CC_INGRE=list(df14_6[df14_6.columns[0]])\r\n \r\n\r\n #Excluimos Documento de extranjeria dado que no coincide con el archivo 1\r\n CC_AUX=[]\r\n for value in CC_INGRE: \r\n if len(str(value))>=13:\r\n pass\r\n else:\r\n temp=value\r\n CC_AUX.append(temp)\r\n CC_INGRE=CC_AUX\r\n \r\n df14_6=df14_6[df14_6[df14_6.columns[0]].isin(CC_INGRE)].reset_index(drop=True)\r\n \r\n \r\n df14_6.rename(columns={df14_6.columns[0]:\"CC\"},inplace=True)\r\n df14_6.rename(columns={df14_6.columns[2]:\"TIPO SALARIO\"},inplace=True)\r\n\r\n #Personal a modificar:\r\n\r\n df_replace_2=df_revision.loc[df_revision[\"CC\"].isin(CC_INGRE)].reset_index(drop=True)\r\n df_no_replace_2=df_revision[df_revision[\"CC\"].isin(CC_INGRE)==False]\r\n\r\n\r\n df_replace_2[\"SALARIO TOTAL\"]=list(df_replace_2.merge(df14_6,on=\"CC\",how=\"left\")[\"COMPENSACION \"])\r\n df_replace_2[\"TIPO DE SALARIO \"]=list(df_replace_2.merge(df14_6,on=\"CC\",how=\"left\")[\"TIPO SALARIO\"])\r\n \r\n\r\n return df_replace_2,df_no_replace_2\r\n\r\n# CONFIGURA LOS PORCENTAJES DE PARTICIPIACIÓN DEL SALARIO\r\ndef distribucion_salario(df_revision):\r\n\r\n \"\"\"\r\n Esta función toma el archivo de revisión y calcula los porcentajes de salario básico, flexible \r\n dependiendo del tpo de contrato y de salario\r\n \r\n \"\"\"\r\n\r\n #Quitamos espacios a texto de columna TIPO SALARIO:\r\n df_revision[\"TIPO DE SALARIO \"]=df_revision[\"TIPO DE SALARIO \"].replace(\"PLENO\",\"ORDINARIO\")\r\n df_revision[\"TIPO DE SALARIO \"]=df_revision[\"TIPO DE SALARIO \"].str.strip()\r\n\r\n #Agregar % que hacen falta del personal nuevo:\r\n\r\n #cambios a salario flexible\r\n\r\n df_revision.loc[(df_revision[\"%AL 100\"].isna()) & (df_revision[\"TIPO DE SALARIO \"]==\"FLEXIBLE\"),\"%AL 100\"]=1.0\r\n df_revision.loc[(df_revision[\"%SALARIO BASICO \"].isna()) & (df_revision[\"TIPO DE SALARIO \"]==\"FLEXIBLE\"),\"%SALARIO BASICO \"]=0.66\r\n df_revision.loc[(df_revision[\"% FLI \"].isna()) & (df_revision[\"TIPO DE SALARIO \"]==\"FLEXIBLE\"),'% FLI ']=0.34\r\n\r\n #cambios a salario ordinario\r\n\r\n df_revision.loc[(df_revision[\"%AL 100\"].isna()) & (df_revision[\"TIPO DE SALARIO \"]==\"ORDINARIO\"),\"%AL 100\"]=1.0\r\n df_revision.loc[(df_revision[\"%SALARIO BASICO \"].isna()) & (df_revision[\"TIPO DE SALARIO \"]==\"ORDINARIO\"),\"%SALARIO BASICO \"]=1.0\r\n df_revision.loc[(df_revision[\"% FLI \"].isna()) & (df_revision[\"TIPO DE SALARIO \"]==\"ORDINARIO\"),'% FLI ']=0.0\r\n\r\n #cambios a sostenimiento aprendiz\r\n\r\n df_revision.loc[(df_revision[\"%AL 100\"].isna()) & (df_revision[\"TIPO DE SALARIO \"]==\"APOYO SOSTENIMIENTO\"),\"%AL 100\"]=1.0\r\n df_revision.loc[(df_revision[\"%SALARIO BASICO \"].isna()) & (df_revision[\"TIPO DE SALARIO \"]==\"APOYO SOSTENIMIENTO\"),\"%SALARIO BASICO \"]=1.0\r\n df_revision.loc[(df_revision[\"% FLI \"].isna()) & (df_revision[\"TIPO DE SALARIO \"]==\"APOYO SOSTENIMIENTO\"),'% FLI ']=0.0\r\n\r\n #cambios a salario flexible integral\r\n\r\n df_revision.loc[(df_revision[\"TIPO\"]==\"INTEGRAL\") & (df_revision[\"TIPO DE SALARIO \"]==\"FLEXIBLE\"),\"%AL 100\"]=1.0\r\n df_revision.loc[(df_revision[\"TIPO\"]==\"INTEGRAL\") & (df_revision[\"TIPO DE SALARIO \"]==\"FLEXIBLE\"),\"%SALARIO BASICO \"]=0.9\r\n df_revision.loc[(df_revision[\"TIPO\"]==\"INTEGRAL\") & (df_revision[\"TIPO DE SALARIO \"]==\"FLEXIBLE\"),'% FLI ']=0.1\r\n\r\n df_revision.loc[(df_revision[\"TIPO\"]==\"INTEGRAL\") & (df_revision[\"TIPO DE SALARIO \"]==\"FLI\"),\"%AL 100\"]=1.0\r\n df_revision.loc[(df_revision[\"TIPO\"]==\"INTEGRAL\") & (df_revision[\"TIPO DE SALARIO \"]==\"FLI\"),\"%SALARIO BASICO \"]=0.9\r\n df_revision.loc[(df_revision[\"TIPO\"]==\"INTEGRAL\") & (df_revision[\"TIPO DE SALARIO \"]==\"FLI\"),'% FLI ']=0.1\r\n \r\n df_revision.loc[df_revision[\"%AL 100\"]==0,\"%AL 100\"]=1.0\r\n\r\n\r\n return df_revision\r\n\r\n#CREA ARCHIVO PARA OPERAR:\r\ndef crea_archivo_pruebas(path_archivo_base,df_revision):\r\n df_base=pd.read_excel(path_archivo_base)\r\n df_base[\"ARCHIVO\"]=\"BASE\"\r\n\r\n df_revision[\"ARCHIVO\"]=\"REVISION\"\r\n\r\n #Append al archivo ya organizado del nuevo archivo a revisar:\r\n df_pruebas=pd.concat([df_base,df_revision]).fillna(0)\r\n df_pruebas=df_pruebas[df_pruebas[\"ARCHIVO\"]==\"REVISION\"].drop(columns=\"ARCHIVO\")\r\n\r\n return df_pruebas\r\n\r\n#Funciones MAIN\r\ndef compila_archivo(path_nom_hori,path_maestro,path_revi_nomi,path_novedades,path_archivo_base):\r\n\r\n \"\"\"\r\n Esta función integra todas las funciones que realizan cambios y uniones de archivos y retorna \\n\r\n un solo DataFrame con el formato final listo para realizar los cáldulos de conceptos. \r\n \"\"\"\r\n\r\n global df_revision\r\n\r\n df_revision=format_nomina_horizontal(path_nom_hori).merge(maestro(path_maestro),on=\"CC\",how=\"left\")\r\n df_revision.rename(columns={\"codigo_empleado\":\"CODIGO\",\"fecha_ingreso_contrato\":\"FECHA DE INGRESO\"},inplace=True)\r\n \r\n \r\n \r\n df_revision=df_revision.merge(rev_nom_anterior(path_revi_nomi),on=\"CC\",how=\"left\")\r\n df_revision['SALARIO TOTAL']=df_revision['SALARIO TOTAL'].astype(float)\r\n \r\n \r\n \r\n df_revision=pd.concat([novedades_nomina(path_novedades,df_revision)[0],novedades_nomina(path_novedades,df_revision)[1]]).reset_index(drop=True)\r\n\r\n\r\n \r\n df_revision=pd.concat([ingresos(path_novedades,df_revision)[0],ingresos(path_novedades,df_revision)[1]]).reset_index(drop=True)\r\n\r\n \r\n df_revision=crea_archivo_pruebas(path_archivo_base,distribucion_salario(df_revision))\r\n \r\n\r\n return df_revision\r\n\r\ndef generarRuta(fecha_corte,rutaInput):\r\n \r\n\r\n raiz = rutaInput\r\n rutaUser = raiz[0:raiz[9:len(raiz)].index(\"/\")+9]\r\n\r\n #ruta = rutaUser+'/MVM Ingenieria de Software/Unidad de Sostenibilidad y Crecimiento - Documentos/Confidencial/Privada/Gestión Financiera/Balanc/2022/Control Interno/Nomina/'+ fecha_corte[:10].split(\"-\")[1]+'/REVISIONES/Revision pago de nomina '+fecha_corte.replace(\"-\",\"\")+'.xlsx'\r\n #ruta = rutaUser+'/MVM Ingenieria de Software/Unidad de Sostenibilidad y Crecimiento - Balanc/'+ fecha_corte[:4]+'/Control Interno/Nomina/'+ fecha_corte[:10].split(\"-\")[1]+'/REVISIONES/Revisión preliminar de nómina '+fecha_corte.replace(\"-\",\"\")+'.xlsx'\r\n ruta = '.\\\\resultados\\\\Revision prueba-2.xlsx'\r\n return ruta\r\n\r\ndef calculo_conceptos(path_novedades,path_facturacion,df_pruebas):\r\n\r\n \r\n \r\n\r\n \"\"\"\r\n Esta función calcula sobre el archivo base los diferentes conceptos de nómina por cada empleado. \\n\r\n Adicionalmente, crea una columna de revisión por cada concepto con el objetivo de encontrar errores.\r\n\r\n \"\"\"\r\n\r\n #Leer archivo 14 para extraer información necesaria:\r\n xls = pd.ExcelFile(path_novedades)\r\n\r\n # archivo disponibilidades:\r\n df14_1=xls.parse(2,header=3).iloc[:,[0,1,5,6,7,8]]\r\n df14_1.rename(columns={df14_1.columns[0]:\"CC\",df14_1.columns[1]:\"NOMBRE\"},inplace=True)\r\n\r\n ## archivo horas adicionales\r\n df14_horas=df14_1.iloc[:,0:4].rename(columns={df14_1.columns[2]:\"CANTIDADES\",df14_1.columns[3]:\"VALOR CUOTA\"})\r\n df14_horas[\"VALOR CUOTA\"]=df14_horas[\"VALOR CUOTA\"].astype(float)\r\n df14_horas=df14_horas.groupby([\"CC\"])[[\"VALOR CUOTA\"]].sum().reset_index()\r\n\r\n ## archivo dias adicionales\r\n df14_dias=df14_1.iloc[:,[0,1,4,5]].rename(columns={df14_1.columns[5]:\"VALOR CUOTA\"})\r\n df14_dias[\"VALOR CUOTA\"]=df14_dias[\"VALOR CUOTA\"].astype(float)\r\n df14_dias=df14_dias.groupby([\"CC\"])[[\"VALOR CUOTA\"]].sum().reset_index()\r\n\r\n # reembolso gastos\r\n df14_2=xls.parse(3,header=4).iloc[:,[0,1,7,9]]\r\n df14_2.rename(columns={df14_2.columns[0]:\"CC\",df14_2.columns[1]:\"NOMBRE\",\"VALOR\":\"VALOR CUOTA\"},inplace=True)\r\n df14_2[\"VALOR CUOTA\"]=df14_2[\"VALOR CUOTA\"].astype(float)\r\n df14_2=df14_2[df14_2[\"DESCRIPCIÓN\"]==\"REEMBOLSO DE GASTOS \"]\r\n df_reembolso=df14_2.iloc[:,[0,3]]\r\n\r\n\r\n # Insertamos columnas vacías para agregar información y formulas\r\n df_pruebas.insert(14, 'SALARIO BASICO ', np.nan)\r\n df_pruebas.insert(15, 'SALARIO FLI ', np.nan)\r\n df_pruebas.insert(16, 'VALIDACIÓN SALARIO', np.nan)\r\n df_pruebas.insert(22, 'DIA DE LA FAMILIA ', np.nan)\r\n df_pruebas.insert(33, \"REVISION DÍAS\", np.nan)\r\n df_pruebas.insert(34, \"OBSERVACIONES\", np.nan)\r\n df_pruebas.insert(36, \"REVISIÓN 0010\", np.nan)\r\n df_pruebas.insert(38, \"REVISIÓN 0015\", np.nan)\r\n df_pruebas.insert(40, \"REVISIÓN 0024\", np.nan)\r\n df_pruebas.insert(42, \"REVISIÓN 0025\", np.nan)\r\n df_pruebas.insert(46, \"VALOR DISP HORAS\", list(df_pruebas.merge(df14_horas,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(47, \"REVISIÓN 0316\", np.nan)\r\n df_pruebas.insert(49, \"VALOR DISP DIAS\", list(df_pruebas.merge(df14_dias,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(50, \"REVISIÓN 0317\", np.nan)\r\n\r\n\r\n ## 0340 Leemos el archivo 13, antes de esto debemos abrirlo y cambiar el formato de la hoja de cargue tarjetas\r\n xls = pd.ExcelFile(path_facturacion)\r\n df_14=xls.parse(19,header=1)\r\n df_14=df_14.iloc[:,[1,2,4,7]].dropna()\r\n df_14.rename(columns={df_14.columns[0]:\"CC\",df_14.columns[1]:\"NOMBRE\",df_14.columns[3]:\"VALOR CUOTA\",\r\n df_14.columns[2]:\"COD CONCEPTO\"},inplace=True)\r\n df_14[\"COD CONCEPTO\"]=df_14[\"COD CONCEPTO\"].astype(\"int64\")\r\n df_14=df_14.groupby([\"CC\",\"NOMBRE\",\"COD CONCEPTO\"])[[\"VALOR CUOTA\"]].sum().reset_index()\r\n df_14=df_14[df_14[\"VALOR CUOTA\"]>0].reset_index(drop=True)\r\n\r\n #Conservamos solo tarjetas de alimentación y columnas utiles\r\n df_alim=df_14[df_14[\"COD CONCEPTO\"]==340].iloc[:,[0,3]]\r\n\r\n #Conservamos solo tarjetas de gasolina y columnas utiles\r\n df_gaso=df_14[df_14[\"COD CONCEPTO\"]==1953].iloc[:,[0,3]]\r\n\r\n #Libranza Davivienda:\r\n df_19=xls.parse(11).iloc[:,[0,1]].dropna()\r\n df_19.rename(columns={df_19.columns[0]:\"CC\",df_19.columns[1]:\"VALOR CUOTA\"},inplace=True)\r\n df_19[\"COD CONCEPTO\"]=2704\r\n df_19[\"CONCEPTO\"]=\"LIBRANZA DAVIVIENDA\"\r\n df_19=df_19.groupby([df_19.columns[0],df_19.columns[2],df_19.columns[3]])[[df_19.columns[1]]].sum().reset_index()\r\n\r\n #Libranza Bancolombia:\r\n df_20=xls.parse(13).iloc[:,[0,1]].dropna()\r\n df_20.rename(columns={df_20.columns[0]:\"CC\",df_20.columns[1]:\"VALOR CUOTA\"},inplace=True)\r\n df_20[\"COD CONCEPTO\"]=2705\r\n df_20[\"CONCEPTO\"]=\"LIBRANZA BANCOLOMBIA\"\r\n df_20=df_20.groupby([df_20.columns[0],df_20.columns[2],df_20.columns[3]])[[df_20.columns[1]]].sum().reset_index()\r\n\r\n #Función para hojas del archivo 13:\r\n def format13(n,codigo,concepto,head):\r\n df=xls.parse(n,header=head).iloc[:,0:3].dropna()\r\n df.rename(columns={df.columns[0]:\"CC\",df.columns[1]:\"NOMBRE\",df.columns[2]:\"VALOR CUOTA\"},inplace=True)\r\n df[\"COD CONCEPTO\"]=codigo\r\n df[\"CONCEPTO\"]=concepto\r\n df=df.groupby([df.columns[0],df.columns[1],df.columns[3],df.columns[4]])[[df.columns[2]]].sum().reset_index()\r\n df\r\n return df\r\n\r\n #Prestamos comfama:\r\n df_10=format13(15,2709,\"PRESTAMO COMFAMA\",0)\r\n\r\n #Servicios COMFAMA:\r\n df_22=xls.parse(24,header=0).iloc[:,[0,1,5]].dropna()\r\n df_22.rename(columns={df_22.columns[0]:\"CC\",df_22.columns[1]:\"NOMBRE\",df_22.columns[2]:\"VALOR CUOTA\"},inplace=True)\r\n df_22[\"COD CONCEPTO\"]=2710\r\n df_22[\"CONCEPTO\"]=\"SERVICIOS COMFAMA MATRICULAS\"\r\n df_22=df_22.groupby([df_22.columns[0],df_22.columns[1],df_22.columns[3],df_22.columns[4]])[[df_22.columns[2]]].sum().reset_index()\r\n\r\n #Descuento EMI:\r\n df_9=xls.parse(10).iloc[:,[0,1,4]].dropna()\r\n df_9.rename(columns={df_9.columns[0]:\"CC\",df_9.columns[1]:\"NOMBRE\",df_9.columns[2]:\"VALOR CUOTA\"},inplace=True)\r\n df_9[\"COD CONCEPTO\"]=2712\r\n df_9[\"CONCEPTO\"]=\"DESCUENTO EMI\"\r\n df_9=df_9.groupby([df_9.columns[0],df_9.columns[1],df_9.columns[3],df_9.columns[4]])[[df_9.columns[2]]].sum().reset_index()\r\n\r\n #Descuento PREVER:\r\n df_12=format13(17,2713,\"DESCUENTO PREVER\",0)\r\n\r\n #POLIZA AUTO\r\n df_6=format13(5,2714,\"POLIZA DE AUTO\",0)\r\n\r\n #POLIZA DE VIDA:\r\n df_7=format13(7,2715,\"POLIZA DE VIDA\",0)\r\n\r\n #movistar:\r\n df_11=xls.parse(16,header=1).iloc[:,[0,2,3]].dropna()\r\n df_11.rename(columns={df_11.columns[0]:\"CC\",df_11.columns[1]:\"NOMBRE\",df_11.columns[2]:\"VALOR CUOTA\"},inplace=True)\r\n df_11[\"COD CONCEPTO\"]=2724\r\n df_11[\"CONCEPTO\"]=\"DESCUENTO MOVISTAR\"\r\n df_11=df_11.groupby([df_11.columns[0],df_11.columns[1],df_11.columns[3],df_11.columns[4]])[[df_11.columns[2]]].sum().reset_index()\r\n\r\n #Poliza SURA PAC\r\n df_8=format13(8,2737,\"POLIZA SURA PAC\",0)\r\n\r\n #AHORRO FEMTI:\r\n df_21=xls.parse(31).iloc[:,[0,1,3]].dropna()\r\n df_21.rename(columns={df_21.columns[0]:\"CC\",df_21.columns[1]:\"NOMBRE\",df_21.columns[2]:\"VALOR CUOTA\"},inplace=True)\r\n df_21[\"COD CONCEPTO\"]=2739\r\n df_21[\"CONCEPTO\"]=\"AHORRO FEMTI\"\r\n df_21=df_21.groupby([df_21.columns[0],df_21.columns[1],df_21.columns[3],df_21.columns[4]])[[df_21.columns[2]]].sum().reset_index()\r\n\r\n #APORTE VOLUNTARIO:\r\n df_18=format13(30,3210,\"APORTE VOLUNTARIO A PENSION VOLUNTARIA\",0)\r\n\r\n #APORTE AFC:\r\n df_17=format13(29,3213,\"APORTE AFC\",0)\r\n\r\n #PREPAGADA COLSANITAS\r\n df_1=format13(0,3602,\"DESCUENTO MEDICINA PREPAGADA COLSANITAS\",0)\r\n\r\n #PREPAGADA MEDISANITAS\r\n df_2=format13(1,3604,\"DESCUENTO MEDICINA PREPAGADA MEDISANITAS\",0)\r\n\r\n #MEDICINA PREPAGADA SURA - GLOBAL\r\n df_4=format13(3,3607,\"MEDICINA PREPAGADA SURA - GLOBAL\",0)\r\n\r\n #MEDICINA PREPAGADA SURA - ESPECIAL\r\n df_3=format13(2,3608,\"MEDICINA PREPAGADA SURA - ESPECIAL\",0)\r\n\r\n #MEDICINA PREPAGADA SURA - CLASICA\r\n df_5=format13(4,3609,\"MEDICINA PREPAGADA SURA - CLASICA\",0)\r\n\r\n\r\n #MEDICINA PREPAGADA COOMEVA\r\n df_16=format13(22,3611,\"DESCUENTO MEDICINA PREPAGADA COOMEVA\",0)\r\n\r\n # Insert an empty column to write the formulas\r\n df_pruebas.insert(52, \"VALOR ALIMENTACIÓN\", list(df_pruebas.merge(df_alim,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(53, \"REVISIÓN 0340\", np.nan)\r\n df_pruebas.insert(55, \"REVISIÓN 0740\", np.nan)\r\n df_pruebas.insert(57, \"REVISIÓN 0791\", np.nan)\r\n df_pruebas.insert(59, \"REVISIÓN 0920\", np.nan)\r\n df_pruebas.insert(61, \"REVISIÓN 0923\", np.nan)\r\n df_pruebas.insert(63, \"VALOR REEMBOLSO GASTOS\", list(df_pruebas.merge(df_reembolso,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(64, \"REVISIÓN 1200\", np.nan)\r\n df_pruebas.insert(66, \"REVISIÓN 1600\", np.nan)\r\n df_pruebas.insert(68, \"REVISIÓN 1670\", np.nan)\r\n df_pruebas.insert(70, \"REVISIÓN 1671\", np.nan)\r\n df_pruebas.insert(72, \"REVISIÓN 1673\", np.nan)\r\n df_pruebas.insert(74, \"REVISIÓN 1730\", np.nan)\r\n df_pruebas.insert(77, \"REVISIÓN 1950\", np.nan)\r\n df_pruebas.insert(79, \"VALOR GASOLINA\", list(df_pruebas.merge(df_gaso,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(80, \"REVISIÓN 1953\", np.nan)\r\n df_pruebas.insert(82, \"REVISIÓN 1954\", np.nan)\r\n df_pruebas.insert(84, \"REVISIÓN 1956\", np.nan)\r\n df_pruebas.insert(86, \"REVISIÓN 1957\", np.nan)\r\n df_pruebas.insert(88, \"REVISIÓN 1970\", np.nan)\r\n df_pruebas.insert(90, \"REVISIÓN 1971\", np.nan)\r\n df_pruebas.insert(93, \"REVISIÓN 2500\", np.nan)\r\n df_pruebas.insert(95, \"REVISIÓN 2510\", np.nan)\r\n df_pruebas.insert(98, \"REVISIÓN 2520\", np.nan)\r\n df_pruebas.insert(100, \"VALOR LIBRANZA DAVIVIENDA\", list(df_pruebas.merge(df_19,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(101, \"REVISIÓN 2704\", np.nan)\r\n df_pruebas.insert(103, \"VALOR LIBRANZA BANCOLOMBIA\", list(df_pruebas.merge(df_20,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(104, \"REVISIÓN 2705\", np.nan)\r\n df_pruebas.insert(106, \"VALOR PRESTAMOS COMFAMA\", list(df_pruebas.merge(df_10,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(107, \"REVISIÓN 2709\", np.nan)\r\n df_pruebas.insert(109, \"VALOR SERVICIOS COMFAMA\", list(df_pruebas.merge(df_22,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(110, \"REVISIÓN 2710\", np.nan)\r\n df_pruebas.insert(112, \"VALOR DESCUENTO EMI\", list(df_pruebas.merge(df_9,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(113, \"REVISIÓN 2712\", np.nan)\r\n df_pruebas.insert(115, \"VALOR DESCUENTO PREVER\", list(df_pruebas.merge(df_12,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(116, \"REVISIÓN 2713\", np.nan)\r\n df_pruebas.insert(118, \"VALOR POLIZA AUTO\", list(df_pruebas.merge(df_6,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(119, \"REVISIÓN 2714\", np.nan)\r\n df_pruebas.insert(121, \"VALOR POLIZA VIDA\", list(df_pruebas.merge(df_7,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(122, \"REVISIÓN 2715\", np.nan)\r\n df_pruebas.insert(124, \"VALOR DESCUENTO MOVISTAR\", list(df_pruebas.merge(df_11,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(125, \"REVISIÓN 2724\", np.nan)\r\n df_pruebas.insert(127, \"VALOR POLIZA SURA PAC\", list(df_pruebas.merge(df_8,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(128, \"REVISIÓN 2737\", np.nan)\r\n df_pruebas.insert(130, \"VALOR AHORRO FEMTI\", list(df_pruebas.merge(df_21,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(131, \"REVISIÓN 2739\", np.nan)\r\n df_pruebas.insert(133, \"VALOR APORTE VOLUNTARIO\", list(df_pruebas.merge(df_18,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(134, \"REVISIÓN 3210\", np.nan)\r\n df_pruebas.insert(136, \"VALOR APORTE AFC\", list(df_pruebas.merge(df_17,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(137, \"REVISIÓN 3213\", np.nan)\r\n df_pruebas.insert(139, \"REVISIÓN 3222\", np.nan)\r\n df_pruebas.insert(141, \"VALOR PREPAGADA COLSANITAS\", list(df_pruebas.merge(df_1,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(142, \"REVISIÓN 3602\", np.nan)\r\n df_pruebas.insert(144, \"VALOR PREPAGADA MEDISANITAS\", list(df_pruebas.merge(df_2,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(145, \"REVISIÓN 3604\", np.nan)\r\n df_pruebas.insert(147, \"VALOR PREPAGADA SURA GLOBAL\", list(df_pruebas.merge(df_4,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(148, \"REVISIÓN 3607\", np.nan)\r\n df_pruebas.insert(150, \"VALOR PREPAGADA SURA ESPECIAL\", list(df_pruebas.merge(df_3,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(151, \"REVISIÓN 3608\", np.nan)\r\n df_pruebas.insert(153, \"VALOR PREPAGADA SURA CLASICA\", list(df_pruebas.merge(df_5,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(154, \"REVISIÓN 3609\", np.nan)\r\n df_pruebas.insert(156, \"VALOR PREPAGADA COOMEVA\", list(df_pruebas.merge(df_16,on=\"CC\",how=\"left\")[\"VALOR CUOTA\"].fillna(0)))\r\n df_pruebas.insert(157, \"REVISIÓN 3611\", np.nan)\r\n df_pruebas.insert(159, \"REVISIÓN 3900\", np.nan)\r\n df_pruebas.insert(161, \"REVISIÓN 3901\", np.nan)\r\n df_pruebas.insert(163, \"REVISIÓN 3903\", np.nan)\r\n df_pruebas.insert(166, \"REVISIÓN 3920\", np.nan)\r\n \r\n global merged_data,df_IBC,dftest,dfinicial,df_merged\r\n \r\n dfinicial = df_pruebas\r\n mes = fecha.split('-')[1]\r\n df_IBC = agregar_columna_IBC(mes)\r\n merged_data = pd.merge(df_IBC, df_pruebas, on='CC')\r\n # df_pruebas = df_pruebas.join(merged_data['IBC'])\r\n dftest = df_pruebas\r\n \r\n # ----INICIO CODIGO DE MERGE FUNCIONANDO\r\n # Unir los dataframes usando el método merge y especificar que es unir por la columna 'CC'\r\n df_merged = pd.merge(dfinicial, df_IBC[['CC', 'IBC']], on='CC', how='left')\r\n\r\n # Reemplazar los valores NaN, es decir los registros que no están en df1, con el valor 99\r\n df_merged['IBC'] = df_merged['IBC'].fillna(0)\r\n \r\n #Eliminar comas y convertir a np.float\r\n df_merged['IBC'] = df_merged['IBC'].replace(',', '', regex=True).astype(np.float64)\r\n # ----FIN CODIGO DE MERGE FUNCIONANDO\r\n \r\n # Start the xlsxwriter\r\n #writer = pd.ExcelWriter('resultados/NOMINA/2022/Formato revisión nómina'+mes+'3.xlsx', engine='xlsxwriter')\r\n #writer = pd.ExcelWriter(ruta_guardar+'/Formato revisión preliminar de nómina.xlsx', engine='xlsxwriter')\r\n\r\n rutaGuardarArchivos = generarRuta(fecha,ruta_nomina_horizontal)\r\n\r\n writer = pd.ExcelWriter(rutaGuardarArchivos, engine=\"xlsxwriter\")\r\n\r\n # df_pruebas.to_excel(writer, sheet_name='Hoja1', index=False)\r\n df_merged.to_excel(writer, sheet_name='Hoja1', index=False)\r\n workbook = writer.book\r\n worksheet = writer.sheets['Hoja1']\r\n\r\n\r\n # Create a for loop to start writing the formulas to each row\r\n global col_IBC\r\n col_IBC = num_a_col_excel(df_merged.columns.get_loc('IBC')+1)\r\n\r\n #Formulas:\r\n \r\n\r\n #Salario basico\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=N{row}*L{row}'\r\n worksheet.write_formula(f\"O{row}\", formula)\r\n\r\n #Salario fli\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=N{row}*M{row}'\r\n worksheet.write_formula(f\"P{row}\", formula)\r\n\r\n #Validación Salario\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=(O{row}+P{row})-N{row}'\r\n worksheet.write_formula(f\"Q{row}\", formula)\r\n\r\n #agregamos día de la familia:\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=IF(AN{row}=0,0,1)'\r\n worksheet.write_formula(f\"W{row}\", formula)\r\n\r\n #agregamos REVISIÓN DÍAS\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=SUM(R{row}:AF{row})-T{row}'\r\n worksheet.write_formula(f\"AH{row}\", formula)\r\n\r\n #agregamos REVISIÓN 0010\r\n for row in range(2,df_pruebas.shape[0]+2): \r\n formula = f'=IF(F{row}=\"LEY 50\",((O{row}/30)*R{row})-AJ{row},0)'\r\n worksheet.write_formula(f\"AK{row}\", formula)\r\n\r\n #agregamos REVISIÓN 0015\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=IF(F{row}=\"INTEGRAL\",((O{row}/30)*R{row})-AL{row},0)'\r\n worksheet.write_formula(f\"AM{row}\", formula)\r\n\r\n #agregamos REVISIÓN 0024\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=((O{row}/30)*W{row})-AN{row}'\r\n worksheet.write_formula(f\"AO{row}\", formula)\r\n\r\n # Agregamos REVISIÓN 0025\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=IF(F{row}=\"APRENDIZ\",((O{row}/30)*R{row})-AP{row},0)'\r\n worksheet.write_formula(f\"AQ{row}\", formula)\r\n\r\n # REVISIÓN 0316:\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=AU{row}-AT{row}'\r\n worksheet.write_formula(f\"AV{row}\", formula)\r\n\r\n #0317-ATENCION DISPONIBILIDAD DIAS:\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=AX{row}-AW{row}'\r\n worksheet.write_formula(f\"AY{row}\", formula)\r\n\r\n # 0340-ATARJETA ALIMENTACIÓN:\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=BA{row}-AZ{row}'\r\n worksheet.write_formula(f\"BB{row}\", formula)\r\n\r\n # 0740-AUXILIO EMPRESA INCAPACIDAD:\r\n # for row in range(2,df_pruebas.shape[0]+2):\r\n # formula = f'=IF(AND(S{row}<=2,F{row}<>\"INTEGRAL\"),((O{row}/30)*S{row})-BC{row},IF(AND(S{row}>2,F{row}<>\"INTEGRAL\"),((O{row}/30)*2)-BC{row},IF(AND(S{row}<=2,F{row}=\"INTEGRAL\"),(((O{row}*70%)/30)*S{row})-BC{row},IF(AND(S{row}>2,F{row}=\"INTEGRAL\"),(((O{row}*70%)/30)*2)-BC{row},0))))'\r\n # worksheet.write_formula(f\"BD{row}\", formula)\r\n\r\n\r\n # 0740-AUXILIO EMPRESA INCAPACIDAD: EDITADO LUIS PEREZ\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=IF({col_IBC}{row}>0,IF(AND(S{row}<=2,F{row}<>\"INTEGRAL\"),(({col_IBC}{row}/30)*S{row})-BC{row},IF(AND(S{row}>2,F{row}<>\"INTEGRAL\"),(({col_IBC}{row}/30)*2)-BC{row},IF(AND(S{row}<=2,F{row}=\"INTEGRAL\"),((({col_IBC}{row}*70%)/30)*S{row})-BC{row},IF(AND(S{row}>2,F{row}=\"INTEGRAL\"),((({col_IBC}{row}*70%)/30)*2)-BC{row},0)))),IF(AND(S{row}<=2,F{row}<>\"INTEGRAL\"),((O{row}/30)*S{row})-BC{row},IF(AND(S{row}>2,F{row}<>\"INTEGRAL\"),((O{row}/30)*2)-BC{row},IF(AND(S{row}<=2,F{row}=\"INTEGRAL\"),(((O{row}*70%)/30)*S{row})-BC{row},IF(AND(S{row}>2,F{row}=\"INTEGRAL\"),(((O{row}*70%)/30)*2)-BC{row},0)))))'\r\n worksheet.write_formula(f\"BD{row}\", formula)\r\n\r\n\r\n\r\n\r\n # 0791 AUXILIO DE CONECTIVIDAD: Solo a quienes ganen menos de 2 smmlv\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=IF(AND(O{row}<=2000000,BE{row}<>0),True,False)'\r\n worksheet.write_formula(f\"BF{row}\", formula)\r\n\r\n # 0920 AVACACIONES EN DINERO:\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=((O{row}/30)*T{row})-BG{row}'\r\n worksheet.write_formula(f\"BH{row}\", formula)\r\n\r\n # 0923-AUXILIO VACACIONES RFI: \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=((P{row}/30)*T{row})-BI{row}'\r\n worksheet.write_formula(f\"BJ{row}\", formula)\r\n\r\n # 1200-REEMBOLSO DE GASTOS: \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=BL{row}-BK{row}'\r\n worksheet.write_formula(f\"BM{row}\", formula)\r\n\r\n # 1600-ENFERMEDAD GENERAL: \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=IF(AND(S{row}>=0,S{row}<=2),0,((N{row}/30)*S{row}-2)-BN{row})'\r\n worksheet.write_formula(f\"BO{row}\", formula)\r\n\r\n #1670-LICENCIA REMUNERADA \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=((O{row}/30)*AA{row})-BP{row}'\r\n worksheet.write_formula(f\"BQ{row}\", formula)\r\n\r\n # 1671-CALAMIDAD DOMESTICA \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=((O{row}/30)*Z{row})-BR{row}'\r\n worksheet.write_formula(f\"BS{row}\", formula)\r\n\r\n # 1673-LICENCIA POR LUTO \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=((O{row}/30)*Z{row})-BT{row}'\r\n worksheet.write_formula(f\"BU{row}\", formula)\r\n\r\n # 1730-VACACIONES DISFRUTADAS \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=((O{row}/30)*AF{row})-BV{row}'\r\n worksheet.write_formula(f\"BW{row}\", formula)\r\n\r\n # 1950-VAPORTE VOLUNTARIO INST.\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=(P{row}*4%)-BY{row}'\r\n worksheet.write_formula(f\"BZ{row}\", formula)\r\n\r\n #1953- TARJETA GASOLINA RFI\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=CB{row}-CA{row}'\r\n worksheet.write_formula(f\"CC{row}\", formula)\r\n\r\n #1954- BENEFICIO PLAN\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=P{row}-AS{row}-AZ{row}-BY{row}-CA{row}-CD{row}-CF{row}-CH{row}'\r\n worksheet.write_formula(f\"CE{row}\", formula)\r\n\r\n #1956- BENEFICIO PLAN \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=P{row}-AS{row}-AZ{row}-BY{row}-CA{row}-CD{row}-CF{row}-CH{row}'\r\n worksheet.write_formula(f\"CG{row}\", formula)\r\n\r\n #1957- FLEX BEN APOR \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=P{row}-AS{row}-AZ{row}-BY{row}-CA{row}-CD{row}-CF{row}-CH{row}'\r\n worksheet.write_formula(f\"CI{row}\", formula)\r\n\r\n #1970-APORTE INSTITUCIONAL \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=(P{row}/30*R{row})*0.075-CJ{row}' \r\n worksheet.write_formula(f\"CK{row}\", formula)\r\n\r\n #1971-APORTE VOLUNTARIO PLUS \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=(P{row}/30*R{row})*0.1766-CL{row}' \r\n worksheet.write_formula(f\"CM{row}\", formula)\r\n\r\n #2500-APORTE SALUD EGM \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=IF(F{row}=\"INTEGRAL\",(AJ{row}+AL{row}+AN{row}+AT{row}+AW{row}+BC{row}+BN{row}+BP{row}+BR{row}+BT{row}+BV{row})*0.7*0.04,(AJ{row}+AL{row}+AN{row}+AT{row}+AW{row}+BC{row}+BN{row}+BP{row}+BR{row}+BT{row}+BV{row})*0.04)-CO{row}' \r\n worksheet.write_formula(f\"CP{row}\", formula)\r\n\r\n #2510-FONDO DE SOLIDARIDAD\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=IF(AJ{row}>4000000,IF(F{row}=\"INTEGRAL\",(AJ{row}+AL{row}+AN{row}+AT{row}+AW{row}+BC{row}+BN{row}+BP{row}+BR{row}+BT{row}+BV{row})*0.7*0.01,(AJ{row}+AL{row}+AN{row}+AT{row}+AW{row}+BC{row}+BN{row}+BP{row}+BR{row}+BT{row}+BV{row})*0.01),0)-CQ{row}'\r\n worksheet.write_formula(f\"CR{row}\", formula)\r\n\r\n #2520-APORTES PENSION IVM\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=IF(F{row}=\"INTEGRAL\",(AJ{row}+AL{row}+AN{row}+AT{row}+AW{row}+BC{row}+BN{row}+BP{row}+BR{row}+BT{row}+BV{row})*0.7*0.04,(AJ{row}+AL{row}+AN{row}+AT{row}+AW{row}+BC{row}+BN{row}+BP{row}+BR{row}+BT{row}+BV{row})*0.04)-CT{row}'\r\n worksheet.write_formula(f\"CU{row}\", formula)\r\n\r\n #2704-LIBRANZA DAVIVIENDA\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=CW{row}-CV{row}'\r\n worksheet.write_formula(f\"CX{row}\", formula)\r\n\r\n #2705-LIBRANZA BANCOLOMBIA\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=CZ{row}-CY{row}'\r\n worksheet.write_formula(f\"DA{row}\", formula)\r\n\r\n #2709-PRESTAMO COMFAMA\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=DC{row}-DB{row}'\r\n worksheet.write_formula(f\"DD{row}\", formula)\r\n\r\n #2710-SERVICIOS COMFAMA\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=DF{row}-DE{row}'\r\n worksheet.write_formula(f\"DG{row}\", formula)\r\n\r\n #2712-DESCUENTO EMI\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=DI{row}-DH{row}'\r\n worksheet.write_formula(f\"DJ{row}\", formula)\r\n\r\n #2713-DESCUENTO PREVER\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=DL{row}-DK{row}'\r\n worksheet.write_formula(f\"DM{row}\", formula)\r\n\r\n #2714-POLIZA DE AUTO\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=DO{row}-DN{row}'\r\n worksheet.write_formula(f\"DP{row}\", formula)\r\n\r\n #2715-POLIZA DE VIDA\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=DR{row}-DQ{row}'\r\n worksheet.write_formula(f\"DS{row}\", formula)\r\n\r\n #2724-DESCUENTO MOVISTAR\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=DU{row}-DT{row}'\r\n worksheet.write_formula(f\"DV{row}\", formula)\r\n\r\n #2737-POLIZA SURA PAC\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=DX{row}-DW{row}'\r\n worksheet.write_formula(f\"DY{row}\", formula)\r\n\r\n #2739-AHORRO FEMTI\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=EA{row}-DZ{row}'\r\n worksheet.write_formula(f\"EB{row}\", formula)\r\n\r\n #3210-APORTE VOLUNTARIO A\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=ED{row}-EC{row}'\r\n worksheet.write_formula(f\"EE{row}\", formula)\r\n\r\n #3213-APORTE AFC\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=EG{row}-EF{row}'\r\n worksheet.write_formula(f\"EH{row}\", formula)\r\n\r\n #3222-APORTE VOLUNTARIO PLUS\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=CL{row}-EI{row}'\r\n worksheet.write_formula(f\"EJ{row}\", formula)\r\n\r\n #3602-DESCUENTO MEDICINA COLSANITAS\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=EL{row}-EK{row}'\r\n worksheet.write_formula(f\"EM{row}\", formula)\r\n\r\n #3604-DESCUENTO MEDICINA MEDISANITAS\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=EO{row}-EN{row}'\r\n worksheet.write_formula(f\"EP{row}\", formula)\r\n\r\n #3607-MEDICINA PREPAGADA SURA - GLOBAL\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=ER{row}-EQ{row}'\r\n worksheet.write_formula(f\"ES{row}\", formula)\r\n\r\n #3608-MEDICINA PREPAGADA SURA - ESPECIAL\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=EU{row}-ET{row}'\r\n worksheet.write_formula(f\"EV{row}\", formula)\r\n\r\n #3609-MEDICINA PREPAGADA SURA - CLASICA\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=EX{row}-EW{row}'\r\n worksheet.write_formula(f\"EY{row}\", formula)\r\n\r\n #3611-MEDICINA PREPAGADA COOMEVA\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=FA{row}-EZ{row}'\r\n worksheet.write_formula(f\"FB{row}\", formula)\r\n\r\n #3900-APORTE VOLUNTARIO INST. \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=BY{row}-FC{row}'\r\n worksheet.write_formula(f\"FD{row}\", formula)\r\n\r\n #3901-TARJETA ALIMENTACIÓN \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=AZ{row}-FE{row}'\r\n worksheet.write_formula(f\"FF{row}\", formula)\r\n\r\n #3903-TARJETA GASOLINA RFI\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=CA{row}-FG{row}'\r\n worksheet.write_formula(f\"FH{row}\", formula)\r\n\r\n #3920-APORTE INSTITUCIONAL\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=CJ{row}-FJ{row}'\r\n worksheet.write_formula(f\"FK{row}\", formula)\r\n\r\n\r\n writer.save()\r\n writer.close()\r\n \r\n #calcular_0740()\r\n \r\n#----EDITADO POR LUIS ALEJANDRO PÉREZ ABRIL-----------------------------------#\r\n#----REVISIÓN DIFERENCIAS INCAPACIDADES---------------------------------------#\r\n\r\n#Este método evaluará de nuevo el concepto 0740, atenderá los casos que hayan \r\n#tenido aumento de salario \r\ndef leer_RevPreliminar():\r\n \r\n \r\n xls = pd.ExcelFile('.\\\\resultados\\Revision prueba-2.xlsm')\r\n\r\n df14_16=xls.parse(0,header=0)\r\n \r\n \r\n return df14_16\r\n\r\ndef leer_RevPreliminarxlsx():\r\n \r\n \r\n xls = pd.ExcelFile('.\\\\resultados\\Revision prueba-2.xlsx')\r\n\r\n df14_16=xls.parse(0,header=0)\r\n \r\n \r\n return df14_16\r\n\r\ndef leer_NovedadesNomina():\r\n \r\n \r\n xls = pd.ExcelFile('.\\\\resultados\\\\14.NOVEDADES DE NOMINA 20230201.xlsm')\r\n \r\n df = pd.read_excel(xls, sheet_name='VARIACIÓN SALARIO', header=2)\r\n\r\n \r\n \r\n return df\r\n\r\ndef agregar_formulas(df_pruebas,worksheet,writer):\r\n \r\n #Formulas:\r\n\r\n #Salario basico\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=N{row}*L{row}'\r\n worksheet.write_formula(f\"O{row}\", formula)\r\n\r\n #Salario fli\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=N{row}*M{row}'\r\n worksheet.write_formula(f\"P{row}\", formula)\r\n\r\n #Validación Salario\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=(O{row}+P{row})-N{row}'\r\n worksheet.write_formula(f\"Q{row}\", formula)\r\n\r\n #agregamos día de la familia:\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=IF(AN{row}=0,0,1)'\r\n worksheet.write_formula(f\"W{row}\", formula)\r\n\r\n #agregamos REVISIÓN DÍAS\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=SUM(R{row}:AF{row})-T{row}'\r\n worksheet.write_formula(f\"AH{row}\", formula)\r\n\r\n #agregamos REVISIÓN 0010\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=IF(F{row}=\"LEY 50\",((O{row}/30)*R{row})-AJ{row},0)'\r\n worksheet.write_formula(f\"AK{row}\", formula)\r\n\r\n #agregamos REVISIÓN 0015\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=IF(F{row}=\"INTEGRAL\",((O{row}/30)*R{row})-AL{row},0)'\r\n worksheet.write_formula(f\"AM{row}\", formula)\r\n\r\n #agregamos REVISIÓN 0024\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=((O{row}/30)*W{row})-AN{row}'\r\n worksheet.write_formula(f\"AO{row}\", formula)\r\n\r\n # Agregamos REVISIÓN 0025\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=IF(F{row}=\"APRENDIZ\",((O{row}/30)*R{row})-AP{row},0)'\r\n worksheet.write_formula(f\"AQ{row}\", formula)\r\n\r\n # REVISIÓN 0316:\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=AU{row}-AT{row}'\r\n worksheet.write_formula(f\"AV{row}\", formula)\r\n\r\n #0317-ATENCION DISPONIBILIDAD DIAS:\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=AX{row}-AW{row}'\r\n worksheet.write_formula(f\"AY{row}\", formula)\r\n\r\n # 0340-ATARJETA ALIMENTACIÓN:\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=BA{row}-AZ{row}'\r\n worksheet.write_formula(f\"BB{row}\", formula)\r\n\r\n # 0740-AUXILIO EMPRESA INCAPACIDAD:\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=IF(AND(S{row}<=2,F{row}<>\"INTEGRAL\"),((O{row}/30)*S{row})-BC{row},IF(AND(S{row}>2,F{row}<>\"INTEGRAL\"),((O{row}/30)*2)-BC{row},IF(AND(S{row}<=2,F{row}=\"INTEGRAL\"),(((O{row}*70%)/30)*S{row})-BC{row},IF(AND(S{row}>2,F{row}=\"INTEGRAL\"),(((O{row}*70%)/30)*2)-BC{row},0))))'\r\n worksheet.write_formula(f\"BD{row}\", formula)\r\n\r\n # 0791 AUXILIO DE CONECTIVIDAD: Solo a quienes ganen menos de 2 smmlv\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=IF(AND(O{row}<=2000000,BE{row}<>0),True,False)'\r\n worksheet.write_formula(f\"BF{row}\", formula)\r\n\r\n # 0920 AVACACIONES EN DINERO:\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=((O{row}/30)*T{row})-BG{row}'\r\n worksheet.write_formula(f\"BH{row}\", formula)\r\n\r\n # 0923-AUXILIO VACACIONES RFI: \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=((P{row}/30)*T{row})-BI{row}'\r\n worksheet.write_formula(f\"BJ{row}\", formula)\r\n\r\n # 1200-REEMBOLSO DE GASTOS: \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=BL{row}-BK{row}'\r\n worksheet.write_formula(f\"BM{row}\", formula)\r\n\r\n # 1600-ENFERMEDAD GENERAL: \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=IF(AND(S{row}>=0,S{row}<=2),0,((N{row}/30)*S{row}-2)-BN{row})'\r\n worksheet.write_formula(f\"BO{row}\", formula)\r\n\r\n #1670-LICENCIA REMUNERADA \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=((O{row}/30)*AA{row})-BP{row}'\r\n worksheet.write_formula(f\"BQ{row}\", formula)\r\n\r\n # 1671-CALAMIDAD DOMESTICA \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=((O{row}/30)*Z{row})-BR{row}'\r\n worksheet.write_formula(f\"BS{row}\", formula)\r\n\r\n # 1673-LICENCIA POR LUTO \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=((O{row}/30)*Z{row})-BT{row}'\r\n worksheet.write_formula(f\"BU{row}\", formula)\r\n\r\n # 1730-VACACIONES DISFRUTADAS \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=((O{row}/30)*AF{row})-BV{row}'\r\n worksheet.write_formula(f\"BW{row}\", formula)\r\n\r\n # 1950-VAPORTE VOLUNTARIO INST.\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=(P{row}*4%)-BY{row}'\r\n worksheet.write_formula(f\"BZ{row}\", formula)\r\n\r\n #1953- TARJETA GASOLINA RFI\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=CB{row}-CA{row}'\r\n worksheet.write_formula(f\"CC{row}\", formula)\r\n\r\n #1954- BENEFICIO PLAN\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=P{row}-AS{row}-AZ{row}-BY{row}-CA{row}-CD{row}-CF{row}-CH{row}'\r\n worksheet.write_formula(f\"CE{row}\", formula)\r\n\r\n #1956- BENEFICIO PLAN \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=P{row}-AS{row}-AZ{row}-BY{row}-CA{row}-CD{row}-CF{row}-CH{row}'\r\n worksheet.write_formula(f\"CG{row}\", formula)\r\n\r\n #1957- FLEX BEN APOR \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=P{row}-AS{row}-AZ{row}-BY{row}-CA{row}-CD{row}-CF{row}-CH{row}'\r\n worksheet.write_formula(f\"CI{row}\", formula)\r\n\r\n #1970-APORTE INSTITUCIONAL \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=(P{row}/30*R{row})*0.075-CJ{row}' \r\n worksheet.write_formula(f\"CK{row}\", formula)\r\n\r\n #1971-APORTE VOLUNTARIO PLUS \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=(P{row}/30*R{row})*0.1766-CL{row}' \r\n worksheet.write_formula(f\"CM{row}\", formula)\r\n\r\n #2500-APORTE SALUD EGM \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=IF(F{row}=\"INTEGRAL\",(AJ{row}+AL{row}+AN{row}+AT{row}+AW{row}+BC{row}+BN{row}+BP{row}+BR{row}+BT{row}+BV{row})*0.7*0.04,(AJ{row}+AL{row}+AN{row}+AT{row}+AW{row}+BC{row}+BN{row}+BP{row}+BR{row}+BT{row}+BV{row})*0.04)-CO{row}' \r\n worksheet.write_formula(f\"CP{row}\", formula)\r\n\r\n #2510-FONDO DE SOLIDARIDAD\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=IF(AJ{row}>4000000,IF(F{row}=\"INTEGRAL\",(AJ{row}+AL{row}+AN{row}+AT{row}+AW{row}+BC{row}+BN{row}+BP{row}+BR{row}+BT{row}+BV{row})*0.7*0.01,(AJ{row}+AL{row}+AN{row}+AT{row}+AW{row}+BC{row}+BN{row}+BP{row}+BR{row}+BT{row}+BV{row})*0.01),0)-CQ{row}'\r\n worksheet.write_formula(f\"CR{row}\", formula)\r\n\r\n #2520-APORTES PENSION IVM\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=IF(F{row}=\"INTEGRAL\",(AJ{row}+AL{row}+AN{row}+AT{row}+AW{row}+BC{row}+BN{row}+BP{row}+BR{row}+BT{row}+BV{row})*0.7*0.04,(AJ{row}+AL{row}+AN{row}+AT{row}+AW{row}+BC{row}+BN{row}+BP{row}+BR{row}+BT{row}+BV{row})*0.04)-CT{row}'\r\n worksheet.write_formula(f\"CU{row}\", formula)\r\n\r\n #2704-LIBRANZA DAVIVIENDA\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=CW{row}-CV{row}'\r\n worksheet.write_formula(f\"CX{row}\", formula)\r\n\r\n #2705-LIBRANZA BANCOLOMBIA\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=CZ{row}-CY{row}'\r\n worksheet.write_formula(f\"DA{row}\", formula)\r\n\r\n #2709-PRESTAMO COMFAMA\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=DC{row}-DB{row}'\r\n worksheet.write_formula(f\"DD{row}\", formula)\r\n\r\n #2710-SERVICIOS COMFAMA\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=DF{row}-DE{row}'\r\n worksheet.write_formula(f\"DG{row}\", formula)\r\n\r\n #2712-DESCUENTO EMI\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=DI{row}-DH{row}'\r\n worksheet.write_formula(f\"DJ{row}\", formula)\r\n\r\n #2713-DESCUENTO PREVER\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=DL{row}-DK{row}'\r\n worksheet.write_formula(f\"DM{row}\", formula)\r\n\r\n #2714-POLIZA DE AUTO\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=DO{row}-DN{row}'\r\n worksheet.write_formula(f\"DP{row}\", formula)\r\n\r\n #2715-POLIZA DE VIDA\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=DR{row}-DQ{row}'\r\n worksheet.write_formula(f\"DS{row}\", formula)\r\n\r\n #2724-DESCUENTO MOVISTAR\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=DU{row}-DT{row}'\r\n worksheet.write_formula(f\"DV{row}\", formula)\r\n\r\n #2737-POLIZA SURA PAC\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=DX{row}-DW{row}'\r\n worksheet.write_formula(f\"DY{row}\", formula)\r\n\r\n #2739-AHORRO FEMTI\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=EA{row}-DZ{row}'\r\n worksheet.write_formula(f\"EB{row}\", formula)\r\n\r\n #3210-APORTE VOLUNTARIO A\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=ED{row}-EC{row}'\r\n worksheet.write_formula(f\"EE{row}\", formula)\r\n\r\n #3213-APORTE AFC\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=EG{row}-EF{row}'\r\n worksheet.write_formula(f\"EH{row}\", formula)\r\n\r\n #3222-APORTE VOLUNTARIO PLUS\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=CL{row}-EI{row}'\r\n worksheet.write_formula(f\"EJ{row}\", formula)\r\n\r\n #3602-DESCUENTO MEDICINA COLSANITAS\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=EL{row}-EK{row}'\r\n worksheet.write_formula(f\"EM{row}\", formula)\r\n\r\n #3604-DESCUENTO MEDICINA MEDISANITAS\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=EO{row}-EN{row}'\r\n worksheet.write_formula(f\"EP{row}\", formula)\r\n\r\n #3607-MEDICINA PREPAGADA SURA - GLOBAL\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=ER{row}-EQ{row}'\r\n worksheet.write_formula(f\"ES{row}\", formula)\r\n\r\n #3608-MEDICINA PREPAGADA SURA - ESPECIAL\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=EU{row}-ET{row}'\r\n worksheet.write_formula(f\"EV{row}\", formula)\r\n\r\n #3609-MEDICINA PREPAGADA SURA - CLASICA\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=EX{row}-EW{row}'\r\n worksheet.write_formula(f\"EY{row}\", formula)\r\n\r\n #3611-MEDICINA PREPAGADA COOMEVA\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=FA{row}-EZ{row}'\r\n worksheet.write_formula(f\"FB{row}\", formula)\r\n\r\n #3900-APORTE VOLUNTARIO INST. \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=BY{row}-FC{row}'\r\n worksheet.write_formula(f\"FD{row}\", formula)\r\n\r\n #3901-TARJETA ALIMENTACIÓN \r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=AZ{row}-FE{row}'\r\n worksheet.write_formula(f\"FF{row}\", formula)\r\n\r\n #3903-TARJETA GASOLINA RFI\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=CA{row}-FG{row}'\r\n worksheet.write_formula(f\"FH{row}\", formula)\r\n\r\n #3920-APORTE INSTITUCIONAL\r\n for row in range(2,df_pruebas.shape[0]+2):\r\n formula = f'=CJ{row}-FJ{row}'\r\n worksheet.write_formula(f\"FK{row}\", formula)\r\n \r\n writer.save()\r\n writer.close()\r\n\r\ndef agregar_columna_IBC(mes):\r\n \r\n # Restar 1 dia al mes actual, para tomar ibc del mes anterior\r\n mesInt = int(mes)\r\n mesIbc = mesInt -1\r\n\r\n if mesIbc == 0:\r\n mesIbc = '12'\r\n else:\r\n mesIbc = str(mesIbc).zfill(2)\r\n\r\n # Leer archivo de ibc\r\n df = pd.read_excel(\".\\\\resultados\\IBC.xls\", sheet_name=\"ACUMU_INFOR_XEMPLO\", header=7)\r\n\r\n # Eliminar filas y columnas basura\r\n column_names = df.columns\r\n columns_to_drop = [index for index, name in enumerate(column_names) if 'Unnamed' in name]\r\n \r\n global dataframe\r\n \r\n dataframe = df.drop(df.columns[columns_to_drop], axis=1)\r\n dataframe = dataframe.dropna(how='all')\r\n\r\n\r\n # Econtrar cual es el mes de interes para tomar el ibc, toma el mes actual - 1\r\n columnasDf = dataframe.columns\r\n posicion = None\r\n\r\n for i, valor in enumerate(columnasDf):\r\n if valor.find(\"-\"+mesIbc+\"-\") != -1:\r\n posicion = i\r\n break\r\n\r\n if posicion is not None:\r\n columnaIBC = columnasDf[posicion]\r\n else:\r\n columnaIBC = None\r\n \r\n global df_ibc\r\n df_ibc = pd.DataFrame(columns=['CODIGO', 'IBC'])\r\n\r\n # Recorrer las filas para organizar el dataframe, separar por código e IBC\r\n valueCodigo = ''\r\n count = 0\r\n # recorrer las filas del dataframe original\r\n for index, row in dataframe.iterrows():\r\n count = count +1\r\n # verificar si la longitud de CODIGO es mayor a 4\r\n if len(row['CODIGO']) > 4:\r\n nueva_fila = {'CODIGO': row['CODIGO'], 'IBC': '0'}\r\n valueCodigo = row['CODIGO']\r\n else:\r\n # verificar si el valor de CODIGO es igual a '9500'\r\n if row['CODIGO'] == '9500':\r\n nueva_fila = {'CODIGO': valueCodigo, 'IBC': row[columnaIBC]}\r\n nueva_fila_df = pd.DataFrame.from_dict(nueva_fila, orient=\"index\").T\r\n df_ibc = pd.concat([df_ibc, nueva_fila_df], ignore_index=True)\r\n # Quitar el digito de verificacion de cc \r\n\r\n def limpiar_codigo(codigo):\r\n # Separamos el código por las guiones -\r\n codigo_sep = codigo.split(\"-\")\r\n # Si la cantidad de guiones es mayor a 2, solo nos quedamos con el primero y el último\r\n if len(codigo_sep) > 2:\r\n codigo_sep = [codigo_sep[0], codigo_sep[-1]]\r\n # Unimos nuevamente el código con un guion -\r\n codigo_limpio = \"-\".join(codigo_sep)\r\n return codigo_limpio\r\n\r\n # Aplicamos la función a la columna CODIGO\r\n df_ibc['CODIGO'] = df_ibc['CODIGO'].apply(limpiar_codigo)\r\n # Dividir la columna 'codigo' en dos utilizando el carácter '-'\r\n codigo_dividido = df_ibc['CODIGO'].str.split('-', expand=True)\r\n # Asignar la primera columna al nuevo dataframe 'df_nuevo' como la columna 'CC'\r\n df_ibc = df_ibc.assign(CC=codigo_dividido[0])\r\n # Asignar la segunda columna al dataframe 'df_nuevo' como la columna 'NOMBRE'\r\n df_ibc['NOMBRE'] = codigo_dividido[1]\r\n # Quitar los espacios en blanco al principio y al final de la columna 'NOMBRE'\r\n df_ibc['NOMBRE'] = df_ibc['NOMBRE'].str.strip()\r\n df_ibc = df_ibc[['CC','NOMBRE','IBC']]\r\n df_ibc['CC'] = df_ibc['CC'].apply(int)\r\n return df_ibc\r\n\r\n \r\ndef calcular_0740():\r\n \r\n def calculo_fila(fila):\r\n salario_total = fila[\"SALARIO BASICO \"]\r\n dias_incapacidad = fila[\"DIAS INCAPACIDAD\"]\r\n if dias_incapacidad > 2:\r\n result = salario_total / 30 * 2\r\n else:\r\n result = salario_total / 30 * dias_incapacidad\r\n return result\r\n \r\n df_Novedades_Nomina = leer_NovedadesNomina()\r\n df_RevPrel = leer_RevPreliminar()\r\n \r\n global resultados,df_filtrado\r\n df_filtrado = df_RevPrel.loc[abs(df_RevPrel['REVISIÓN 0740']) > 1000.0]\r\n resultados = df_filtrado.apply(calculo_fila, axis=1)\r\n df_filtrado[\"0740-AJUSTADO\"] = resultados\r\n\r\n df_RevPrelxlsx = leer_RevPreliminar()\r\n cc_en_df4 = set(df_filtrado['CC'])\r\n # Ahora, para cada cc en dfrevpreli que esté en df4, reemplazaremos el salario en dfrevpreli con el de df4\r\n for i, row in df_RevPrelxlsx.iterrows():\r\n if row['CC'] in cc_en_df4:\r\n salario_actual = df_filtrado.loc[df_filtrado['CC'] == row['CC'], '0740-AJUSTADO'].iloc[0]\r\n df_RevPrelxlsx.at[i, '0740-AUXILIO EMPRESA '] = salario_actual\r\n \r\n #--------------\r\n\r\n writer = pd.ExcelWriter('.\\\\resultados\\\\Revision prueba-2DEMO.xlsx', engine=\"xlsxwriter\")\r\n df_RevPrelxlsx.to_excel(writer, sheet_name='Hoja1', index=False)\r\n workbook = writer.book\r\n worksheet = writer.sheets['Hoja1']\r\n agregar_formulas(df_RevPrelxlsx,worksheet,writer)\r\n #--------------\r\n #writer = pd.ExcelWriter('output.xlsx')\r\n \r\n # Write the DataFrame to the Excel file\r\n #df_RevPrelxlsx.to_excel(writer)\r\n\r\n # Save the Excel file\r\n #writer.save()\r\n \r\n\r\n\r\n\r\n\r\n#----Interfaz-----------------------------------------------------------------#\r\ndef interfaz():\r\n global fecha\r\n #global ruta_guardar\r\n #sg.theme(\"DarkBlue3\")\r\n sg.set_options(font=(\"Microsoft JhengHei\", 13),background_color=('#ffffff'))\r\n\r\n # Add your new theme colors and settings\r\n sg.LOOK_AND_FEEL_TABLE['MyCreatedTheme'] = {'BACKGROUND': '#ffffff',\r\n 'TEXT': '#929292',\r\n 'INPUT': '#ffffff',\r\n 'TEXT_INPUT': '#929292',\r\n 'SCROLL': '#99CC99',\r\n 'BUTTON': ('#fff', '#4C9C2E'),\r\n 'PROGRESS': ('#D1826B', '#CC8019'),\r\n 'BORDER': 1, 'SLIDER_DEPTH': 0, \r\n 'PROGRESS_DEPTH': 0, }\r\n\r\n # Switch to use your newly created theme\r\n sg.theme('MyCreatedTheme')\r\n\r\n layout = [\r\n\r\n \r\n [ \r\n sg.Image('./ICONO M.png', size=(64,90),background_color=('#ffffff')),\r\n #sg.Text(\"Revisión preliminar de Nómina\",background_color='#fff',text_color='#4C9C2E',font=('bold',37)),#\\n\\nVoy a ayudarte a crear el archivo para la revisión preliminar de la nómina. Para esto necesito que me indiques una fecha y\\nque se adjunten los archivos que se solicitan a continuación (los archivos deben ser con corte al mes que se quiere revisar):\"),\r\n sg.Text(\"Revisión preliminar de Nómina\",text_color='#929292',background_color='#fff',font=('bold',37)),#\\n\\nVoy a ayudarte a crear el archivo para la revisión preliminar de la nómina. Para esto necesito que me indiques una fecha y\\nque se adjunten los archivos que se solicitan a continuación (los archivos deben ser con corte al mes que se quiere revisar):\"),\r\n\r\n ],\r\n\r\n [\r\n sg.Text(\"Voy a ayudarte a crear el archivo para la revisión preliminar de la nómina. Para esto se requiere que cargue la información\\nque se indica a continuación. Los resultados serán consolidados en un excel que se guardará en la ruta '/MVM Ingenieria\\nde Software/Unidad de Sostenibilidad y Crecimiento - Balanc//Control Interno/Nomina//REVISIONES/',\\ncon el nombre 'Revisión preliminar de nómina'.\",\r\n background_color='#fff',text_color='#929292',font=(\"Microsoft JhengHei\",13)),\r\n ],\r\n [\r\n sg.Text(\"Ingreso de información\",text_color=\"#4C9C2E\",font=('bold',20),background_color='#fff'),\r\n\r\n ],\r\n\r\n [\r\n sg.Text(\" • Indique útlimo día del mes de corte\",size=34,background_color='#fff',text_color='#000000'),\r\n sg.Input(key=\"-FECHA-\",size=45,font=12,expand_x=True),\r\n sg.CalendarButton(\"Calendario\",size=9,button_color='#4C9C2E',border_width='1',\r\n close_when_date_chosen=True,location=(900,100),no_titlebar=False,title=\"Fecha\")\r\n\r\n ],\r\n [\r\n sg.Text(\" • Nómina horizontal detallada\",size=(34),background_color='#fff',text_color='#000000'),\r\n sg.Input(key='-INPUT_NOMHORI-', enable_events=True,font=(12),size=45,expand_x=True),\r\n sg.FileBrowse(\"Buscar\",target=\"-INPUT_NOMHORI-\",size=9,file_types=((\"Archivos Excel\", \"*.xlsx\"), (\"Archivos Excel\", \"*.xls\"),(\"Archivos Excel\", \"*.xlsm\")),button_color='#4C9C2E'),\r\n\r\n ],\r\n [\r\n sg.Text(\" • 11. Maestro\",size=(34),background_color='#fff',text_color='#000000'),\r\n sg.Input(key='-INPUT_MAESTRO-', enable_events=True,font=(12),size=45,expand_x=True),\r\n sg.FileBrowse(\"Buscar\",target=\"-INPUT_MAESTRO-\",size=9,file_types=((\"Archivos Excel\", \"*.xlsx\"), (\"Archivos Excel\", \"*.xls\"),(\"Archivos Excel\", \"*.xlsm\")),button_color='#4C9C2E'),\r\n\r\n ],\r\n\r\n\r\n [\r\n sg.Text(\" • 13. Facturación nómina\",size=(34),background_color='#fff',text_color='#000000'),\r\n sg.Input(key='-INPUT_FACTURACION-', enable_events=True,font=(12),size=45,expand_x=True),\r\n sg.FileBrowse(\"Buscar\",target=\"-INPUT_FACTURACION-\",size=9,file_types=((\"Archivos Excel\", \"*.xlsx\"), (\"Archivos Excel\", \"*.xls\"),(\"Archivos Excel\", \"*.xlsm\")),button_color='#4C9C2E'),\r\n\r\n ],\r\n [\r\n sg.Text(\" • 14. Novedades nómina\",size=(34),background_color='#fff',text_color='#000000'),\r\n sg.Input(key='-INPUT_NOVEDADES-', enable_events=True,font=(12),size=45,expand_x=True),\r\n sg.FileBrowse(\"Buscar\",target=\"-INPUT_NOVEDADES-\",size=9,file_types=((\"Archivos Excel\", \"*.xlsx\"), (\"Archivos Excel\", \"*.xls\"),(\"Archivos Excel\", \"*.xlsm\")),button_color='#4C9C2E'),\r\n\r\n ], \r\n [\r\n sg.Text(\" • '16. Revision Nomina' del mes anterior\",size=(34),background_color='#fff',text_color='#000000'),\r\n sg.Input(key='-INPUT_ANTERIOR-', enable_events=True,font=(12),size=45,expand_x=True),\r\n sg.FileBrowse(\"Buscar\",target=\"-INPUT_ANTERIOR-\",size=9,file_types=((\"Archivos Excel\", \"*.xlsx\"), (\"Archivos Excel\", \"*.xls\"),(\"Archivos Excel\", \"*.xlsm\")),button_color='#4C9C2E'),\r\n\r\n ], \r\n \r\n [\r\n sg.Text(\"Ejecución\",text_color=\"#4C9C2E\",font=('bold',20),background_color='#fff'),\r\n\r\n ],\r\n\r\n [\r\n sg.Text(\"Después de agregar toda la información solicitada, presione 'Procesar y guardar' →\",\r\n size=65,background_color='#fff',text_color='#929292',font=(\"Microsoft JhengHei\",13)),\r\n sg.Button(\"Procesar y guardar\",size=18,button_color='#4C9C2E',border_width='2'), \r\n sg.CButton(\"Cerrar\",size=9,button_color='#FA4949',border_width='1')\r\n\r\n ], \r\n\r\n ]\r\n\r\n\r\n\r\n window = sg.Window('MERCURY - REVISIÓN PRELIMINAR DE NÓMINA', layout)\r\n\r\n\r\n df = pd.DataFrame()\r\n\r\n while True:\r\n\r\n event, values = window.read(timeout=100)\r\n if event == sg.WINDOW_CLOSED:\r\n break\r\n elif event is None:\r\n break\r\n elif event == 'Procesar y guardar':\r\n\r\n global ruta_nomina_horizontal\r\n fecha = values['-FECHA-'][:10] \r\n ruta_nomina_horizontal = values['-INPUT_NOMHORI-']\r\n ruta_maestro = values['-INPUT_MAESTRO-']\r\n ruta_revision_mesAnterior=values[\"-INPUT_ANTERIOR-\"]\r\n ruta_novedades_nomina=values[\"-INPUT_NOVEDADES-\"]\r\n ruta_facturacion_nomina=values[\"-INPUT_FACTURACION-\"]\r\n ruta_archivo_base=\".\\\\archivos\\\\archivo base de revision.xlsx\"\r\n\r\n try:\r\n df_new=calculo_conceptos(ruta_novedades_nomina,ruta_facturacion_nomina,\r\n compila_archivo(\r\n ruta_nomina_horizontal,ruta_maestro,ruta_revision_mesAnterior,ruta_novedades_nomina,ruta_archivo_base\r\n )\r\n )\r\n \r\n \r\n \r\n \r\n df=pd.concat([df,df_new])\r\n \r\n \r\n\r\n sg.popup(\"¡El proceso ha finalizado con exito!\",\"Se ha guarado una copia del archivo en la ruta:\\n'/MVM Ingenieria de Software/Unidad de Sostenibilidad y Crecimiento - Balanc/\"+fecha[:4]+'/Control Interno/Nomina\\n/'+ fecha[:10].split(\"-\")[1]+\"/REVISIONES/'.\\nPor favor cierre la ventana.\",title=\"Resultado\",text_color=\"#4C9C2E\",font='bold',background_color='#fff')\r\n \r\n except Exception as e:\r\n\r\n sg.popup_error(f'Ha ocurrido el siguiente error: ' , e, 'Por favor verifique que haya cargado todos los archivos con el formato adecuado.',title=\"Resultado\",background_color='#fff',text_color='#FF0505')\r\n print(\"Error: \", e)\r\n\r\n\r\n window.close()\r\n \r\nif __name__==\"__main__\":\r\n interfaz()","repo_name":"luisfbcudj94/AjustesMercury","sub_path":"1_ReviPreliNomi.py","file_name":"1_ReviPreliNomi.py","file_ext":"py","file_size_in_byte":63037,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"28043181360","text":"from __future__ import annotations\n\nfrom typing import Any\n\nfrom mknodes.basenodes import mktext\nfrom mknodes.info import license\nfrom mknodes.utils import log\n\n\nlogger = log.get_logger(__name__)\n\n\nclass MkLicense(mktext.MkText):\n \"\"\"Node to show a license.\n\n If not explicitely set, the license will be pulled from the project.\n\n \"\"\"\n\n ICON = \"material/license\"\n STATUS = \"new\"\n\n def __init__(\n self,\n license_type: str | None = None,\n **kwargs: Any,\n ):\n \"\"\"Constructor.\n\n Arguments:\n license_type: License to show (identifier from https://spdx.org/licenses/)\n If none is set, it will try to get license from Project\n kwargs: Keyword arguments passed to parent\n \"\"\"\n super().__init__(**kwargs)\n self.license_type = license_type\n\n @property\n def text(self):\n if self.license_type:\n lic = license.License.from_name(self.license_type)\n if self.ctx.metadata.distribution_name:\n lic.resolve_by_distribution(self.ctx.metadata.distribution_name)\n return lic.content\n return self.ctx.metadata.license_text or \"\"\n\n\nif __name__ == \"__main__\":\n lic = MkLicense.with_context(\"GPL-3.0\")\n print(repr(lic))\n","repo_name":"phil65/mknodes","sub_path":"mknodes/templatenodes/mklicense/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"6645990615","text":"#!usr/bin/python\n#encoding:utf-8\n\n__author__ = 'zhangbo'\n\nfrom selenium import webdriver\nfrom time import *\nimport unittest\nimport random\nimport datetime\nfrom db_files.userInformation_db_query import userInformation_db_query_wealth_point_success\nfrom db_files.userInformation_db_wealth_data_update import userInformation_db_wealth_data_update_success\n\n\n#----------------------------------------------------------------------------------------------------------------------#\n # 体验学员取消约课成功\n#----------------------------------------------------------------------------------------------------------------------#\n\ndef user_experience_cancels_cadets_success(driver,user_id,user_mobile):\n\n #浏览器滚动条置底\n js=\"var q=document.documentElement.scrollTop=550\"\n driver.execute_script(js)\n\n sleep(2)\n\n #左侧和更多已预约课程开关\n reserved_courses_on = False\n\n #一对一课表信息开关\n one_to_one_table_information_on = False\n\n #确定取消开关\n cancel_course_on = False\n\n reserved_courses_index = random.randint(0,2)\n\n if reserved_courses_index == 0:\n\n try:\n\n #会员中心:取消课程(3个按钮)\n cancel_course_button_text = driver.find_element_by_xpath(\"//*[@id='orderClass']/div/div[2]/ul/li[3]/a\").text\n\n if cancel_course_button_text == \"取消课程\":\n\n driver.find_element_by_xpath(\"//*[@id='orderClass']/div/div[2]/ul/li[3]/a\").click()\n\n sleep(2)\n\n cancel_course_on = True\n\n except:\n\n #会员中心:取消课程(3个按钮)\n # try:\n #\n # driver.find_element_by_xpath(\"//*[@id='orderClass']/div/div[2]/ul/li[3]/a\").click()\n #\n # sleep(2)\n #\n # cancel_course_on = True\n #\n # except:\n\n print (\"没有找到取消课程按钮,无法取消,请从其它途径取消体验课!!!\")\n\n sleep(2)\n\n elif reserved_courses_index == 1 or reserved_courses_index == 2:\n\n if reserved_courses_index == 1:\n\n #左侧区域已预约课程按钮\n try:\n driver.find_element_by_xpath(\"//*[@id='sidebar']/div/ul[1]/li[3]/a\")\n\n sleep(1)\n\n driver.find_element_by_xpath(\"//*[@id='sidebar']/div/ul[1]/li[3]/a\").click()\n\n reserved_courses_on = True\n\n except:\n\n print (\"没有找到已预约课程按钮,无法取消,请从其它途径取消体验课!!!\")\n\n elif reserved_courses_index == 2:\n\n #我预约的体验课--右侧“更多”按钮\n try:\n\n driver.find_element_by_xpath(\"//*[@id='orderClass']/div/h3/a\")\n\n sleep(1)\n\n driver.find_element_by_xpath(\"//*[@id='orderClass']/div/h3/a\").click()\n\n reserved_courses_on = True\n\n except:\n\n print (\"没有找到更多按钮,无法取消,请从其它途径取消体验课!!!\")\n\n if reserved_courses_on == True:\n\n #课表页信息\n try:\n #取消课程按钮\n driver.find_element_by_xpath(\"//*[@id='container']/div/div/div[2]/div[1]/div[2]/div[2]/div/ul/li[4]/a\")\n\n sleep(1)\n\n driver.find_element_by_xpath(\"//*[@id='container']/div/div/div[2]/div[1]/div[2]/div[2]/div/ul/li[4]/a\").click()\n\n sleep(1)\n\n one_to_one_table_information_on = True\n\n #确定弹框\n if one_to_one_table_information_on == True:\n\n try:\n #确定弹框确定按钮\n driver.find_element_by_xpath(\"//*[@id='m-confirm']/div/div[3]/div/span[1]\")\n\n sleep(1)\n\n driver.find_element_by_xpath(\"//*[@id='m-confirm']/div/div[3]/div/span[1]\").click()\n\n sleep(1)\n\n cancel_course_on = True\n\n except:\n\n print (\"没有找到弹框!!!\")\n\n except:\n\n print (\"当前未预约体验课 或者 当前体验课无法取消,请从其它途径取消体验课!!!\")\n sleep(2)\n\n if cancel_course_on == True:\n\n try:\n\n # 获取当天现在时间\n db_wealth_update_time = strftime('%Y-%m-%d %H:%M:%S', localtime(time()))\n\n sleep(1)\n\n #取消课程页:确定取消\n driver.find_element_by_xpath(\"//*[@id='cancelSubmit']\").click()\n\n sleep(4)\n\n try:\n\n #当天限制只能最多取消20次弹框\n driver.switch_to_alert()\n\n sleep(1)\n\n print (\"当天取消次数过多,无法完成操作,返回会员中心啦!!!\")\n\n except:\n\n #获取用户user_wealth表point\n db_wealth_data = userInformation_db_query_wealth_point_success(user_mobile)\n sleep(1)\n\n db_wealth_point = int(db_wealth_data[1]) + 1\n sleep(1)\n\n db_wealth_point = str(db_wealth_point)\n\n #更新user_wealth表point\n userInformation_db_wealth_data_update_success(user_mobile,db_wealth_point,db_wealth_data[0],db_wealth_update_time)\n\n # -----------------------------------#\n\n print (\"取消约课后--财富数据更新成功!!!\")\n\n # -----------------------------------#\n\n sleep(2)\n\n #取消课程后返回约课页面第一个时间段\n try:\n\n driver.find_element_by_xpath(\"//*[@id='classTime']/div/ul[2]/li[1]/span\")\n\n print (\"课程取消成功,返回到预约体验课页面!!!\")\n\n except:\n\n print (\"返回的预约体验课页面有错误信息,请查看原因!!!\")\n\n except:\n\n print (\"没有找到确定取消按钮,无法取消体验课,请查看原因!!!\")\n\n sleep(2)","repo_name":"zhangbo666/webAutomationTest","sub_path":"test51talk_02/talkUser/user_experience_class/user_experience_cancels_cadets.py","file_name":"user_experience_cancels_cadets.py","file_ext":"py","file_size_in_byte":7626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"36358229482","text":"#!/usr/bin/python\n\nimport argparse\nimport serial\nimport time\n\ntry:\n import meterbus\nexcept ImportError:\n import sys\n sys.path.append('../../')\n import meterbus\n\n\ndef ping_address(address, retries=5):\n for i in range(0, retries + 1):\n meterbus.send_ping_frame(ser, address)\n try:\n frame = meterbus.load(meterbus.recv_frame(ser, 1))\n if isinstance(frame, meterbus.TelegramACK):\n return True\n except meterbus.MBusFrameDecodeError:\n pass\n\n return False\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Scan serial M-Bus for devices.')\n parser.add_argument('-d', action='store_true',\n help='Enable verbose debug')\n parser.add_argument('-b', '--baudrate',\n type=int, default=2400,\n help='Serial bus baudrate')\n parser.add_argument('-r', '--retries',\n type=int, default=5,\n help='Number of ping retries for each address')\n parser.add_argument('device', type=str, help='Serial device or URI')\n\n args = parser.parse_args()\n\n meterbus.debug(args.d)\n\n try:\n with serial.serial_for_url(args.device,\n args.baudrate, 8, 'E', 1, timeout=1) as ser:\n for address in range(0, meterbus.MAX_PRIMARY_SLAVES + 1):\n if ping_address(address, args.retries):\n print(\n \"Found a M-Bus device at address {0}\".format(address)\n )\n except serial.serialutil.SerialException as e:\n print(e)\n","repo_name":"ganehag/pyMeterBus","sub_path":"meterbus/tools/mbus-serial-scan.py","file_name":"mbus-serial-scan.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"50"} +{"seq_id":"33780262718","text":"import torch\r\nimport torch.nn as nn\r\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\r\n\r\nMODULE_DICT = {\r\n \"rnn\": nn.RNN,\r\n \"lstm\": nn.LSTM,\r\n \"gru\": nn.GRU,\r\n}\r\n\r\nclass DocumentLevelRNNEncoder(nn.Module):\r\n def __init__(self, model_type=\"gru\", in_dim=768, hidden_dim=768, out_dim=768, num_layers=1, bidirectional=True):\r\n \"\"\"\r\n an RNN implementation of document-level global paragraph encoder\r\n \"\"\"\r\n super().__init__()\r\n self.document_lstm = MODULE_DICT[model_type](\r\n input_size=in_dim, hidden_size=hidden_dim, num_layers=num_layers, batch_first=True, bidirectional=bidirectional,\r\n # proj_size=out_dim,\r\n )\r\n # raise ValueError(\"proj_size argument is only supported for LSTM, not RNN or GRU\")\r\n self.hidden_dim = 2 * hidden_dim if bidirectional else hidden_dim\r\n print(\"initialized DocumentLevelRNNEncoder\")\r\n\r\n def forward(self, inputs, num_nodes):\r\n \"\"\"\r\n\r\n :param inputs: (batch_size, max_num_nodes, in_dim)\r\n :param num_nodes: (batch_size), oringinal number of nodes in each document in the batch\r\n :return:\r\n \"\"\"\r\n if type(num_nodes) is torch.Tensor:\r\n num_nodes = num_nodes.to(\"cpu\")\r\n # num_nodes = torch.tensor(num_nodes, dtype=torch.int64)\r\n num_nodes = num_nodes.to(torch.int64)\r\n packed_inputs = pack_padded_sequence(inputs, num_nodes, batch_first=True, enforce_sorted=False)\r\n # output, _ = self.document_lstm(inputs) # output: (batch_size, num_token, out_dim*bidirectional)\r\n output, _ = self.document_lstm(packed_inputs)\r\n '''\r\n Outputs: output, (h_n, c_n)\r\n output: (batch_size, num_token, out_dim*bidirectional) when batch_first=True, output features (h_t) from the last layer of the LSTM, for each t\r\n h_n: (bidirectional*num_layers, batch_size, hidden_dim), the final hidden state for each element in the sequence\r\n When bidirectional=True, h_n will contain a concatenation of the final forward and reverse hidden states, respectively\r\n c_n is the same as h_n except that it is the final cell state for each element in the sequence\r\n '''\r\n output = pad_packed_sequence(output, batch_first=True) # (batch_size, max_num_nodes, 2*hidden_dim)\r\n # print(output)\r\n return output[0]","repo_name":"qroam/web-document-discourse-parsing","sub_path":"module/document_encoder/global_encoder.py","file_name":"global_encoder.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"37390764578","text":"import pandas as pd\nimport pytest\n\nfrom ixmp4 import Model\n\nfrom ..utils import all_platforms, assert_unordered_equality, create_filter_test_data\n\n\n@all_platforms\nclass TestDataModel:\n def test_create_model(self, test_mp):\n model = test_mp.backend.models.create(\"Model\")\n assert model.name == \"Model\"\n assert model.created_at is not None\n assert model.created_by == \"@unknown\"\n\n def test_model_unique(self, test_mp):\n test_mp.backend.models.create(\"Model\")\n\n with pytest.raises(Model.NotUnique):\n test_mp.models.create(\"Model\")\n\n def test_get_model(self, test_mp):\n model1 = test_mp.backend.models.create(\"Model\")\n model2 = test_mp.backend.models.get(\"Model\")\n assert model1 == model2\n\n def test_model_not_found(self, test_mp):\n with pytest.raises(Model.NotFound):\n test_mp.models.get(\"Model\")\n\n def test_list_model(self, test_mp):\n test_mp.Run(\"Model 1\", \"Scenario\", version=\"new\")\n test_mp.Run(\"Model 2\", \"Scenario\", version=\"new\")\n\n models = sorted(test_mp.backend.models.list(), key=lambda x: x.id)\n assert models[0].id == 1\n assert models[0].name == \"Model 1\"\n assert models[1].id == 2\n assert models[1].name == \"Model 2\"\n\n def test_tabulate_model(self, test_mp):\n test_mp.Run(\"Model 1\", \"Scenario\", version=\"new\")\n test_mp.Run(\"Model 2\", \"Scenario\", version=\"new\")\n\n true_models = pd.DataFrame(\n [\n [1, \"Model 1\"],\n [2, \"Model 2\"],\n ],\n columns=[\"id\", \"name\"],\n )\n\n models = test_mp.backend.models.tabulate()\n assert_unordered_equality(\n models.drop(columns=[\"created_at\", \"created_by\"]), true_models\n )\n\n def test_filter_model(self, test_mp):\n run1, _ = create_filter_test_data(test_mp)\n\n res = test_mp.backend.models.tabulate(name__like=\"Model %\")\n assert sorted(res[\"name\"].tolist()) == [\"Model 1\", \"Model 2\"]\n\n res = test_mp.backend.models.tabulate(\n iamc={\n \"region\": {\"name\": \"Region 1\"},\n \"run\": {\"default_only\": False},\n }\n )\n assert sorted(res[\"name\"].tolist()) == [\"Model 1\"]\n\n res = test_mp.backend.models.tabulate(\n iamc={\n \"region\": {\"name\": \"Region 3\"},\n \"run\": {\"default_only\": False},\n }\n )\n assert sorted(res[\"name\"].tolist()) == [\"Model 1\", \"Model 2\"]\n\n run1.set_as_default()\n res = test_mp.backend.models.tabulate(\n iamc={\n \"variable\": {\"name\": \"Variable 1\"},\n \"unit\": {\"name__in\": [\"Unit 3\", \"Unit 4\"]},\n \"run\": {\"default_only\": True},\n }\n )\n assert res[\"name\"].tolist() == [\"Model 2\"]\n\n res = test_mp.backend.models.tabulate(\n iamc={\n \"run\": {\"default_only\": False, \"scenario\": {\"name\": \"Scenario 2\"}},\n }\n )\n\n assert sorted(res[\"name\"].tolist()) == [\"Model 2\"]\n","repo_name":"iiasa/ixmp4","sub_path":"tests/data/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"50"} +{"seq_id":"72096871195","text":"\"\"\"\nname : bar_plot\npurpose: create bar plots using a number of user defined parameters\nname : Kostas Mammas \n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nclass Bar_Plot:\n\n def __init__(self):\n self.dataset_object = None\n self.xaxis = None\n self.yaxis = None\n self.xaxisname = None\n self.yaxisname = None\n self.title = None\n self.color = None\n self.group = None\n self.background_color = False\n self.rotate_xaxis = None\n self.text_bars = False\n self.save_path = None\n self.output_name = None\n\n def gen_bar_plot(self):\n\n # Generate bar plot\n plt.figure()\n p = sns.barplot(x=self.dataset_object[self.xaxis],\n y=self.dataset_object[self.yaxis],\n color = self.color)\n # Define names of axes\n p.set(xlabel= self.xaxisname,\n ylabel= self.yaxisname\n )\n # Set title\n p.set_title(self.title)\n\n # Rotate x axis labels\n if (self.rotate_xaxis is not None):\n p.set_xticklabels(self.dataset_object[self.xaxis],\n rotation = self.rotate_xaxis)\n\n # Add text labels on bars\n if (self.text_bars == True):\n for l in p.patches:\n spend = l.get_height()\n p.text(l.get_x() + l.get_width() / 2., spend + 3, '{:1.0f}'.format(spend), ha=\"center\")\n\n # Save plot\n if (self.save_path is not None):\n p.figure.savefig(self.save_path + self.output_name)\n\n return p\n\n def gen_stacked_bar_plot(self):\n\n # Generate bar plot\n plt.figure()\n p = sns.barplot(x=self.dataset_object[self.xaxis],\n y=self.dataset_object[self.yaxis],\n hue=self.dataset_object[self.group])\n\n # Define names of axes\n p.set(xlabel= self.xaxisname,\n ylabel= self.yaxisname\n )\n # Set title\n p.set_title(self.title)\n\n # Rotate x axis labels\n if (self.rotate_xaxis is not None):\n p.set_xticklabels(self.dataset_object[self.xaxis],\n rotation = self.rotate_xaxis)\n\n # Save plot\n if (self.save_path is not None):\n p.figure.savefig(self.save_path + self.output_name)\n\n return p\n\n\n","repo_name":"mammask/Classification","sub_path":"Classification/src/bar_plot.py","file_name":"bar_plot.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"72900296795","text":"n = int(input())\ndef hanoi(disk, start, mid, end):\n if disk == 1: #그냥 옮긴다.\n print(start, end)\n else:\n hanoi(disk - 1, start, end, mid) #비어있는 곳으로 옮기고\n print(start, end) #어떻게 옮겼는 지\n hanoi(disk - 1, mid, start, end) #목적지로 옮긴다.\nprint(2**n-1) #총 횟수\nhanoi(n, 1, 2, 3)","repo_name":"lifrary/Algorithm","sub_path":"Baekjoon/11729.py","file_name":"11729.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"17154446028","text":"#!/usr/bin/env python3\n\nimport requests\nimport os\n\nurl = \"http://localhost/upload/\"\nuser = os.getenv('USER')\npath = '/home/{}/supplier-data/images/'.format(user)\n\nfor file in os.listdir(path):\n if 'jpeg' in file:\n full_path = path + file\n with open(full_path, 'rb') as op:\n req = requests.post(url, files={'file': op})","repo_name":"sabrinatoch/google-it-cert-projects","sub_path":"fruit-catalog/supplier_image_upload.py","file_name":"supplier_image_upload.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"14351183465","text":"import os\nimport pathlib\n\nfrom dotenv import dotenv_values\n\n\nclass Config:\n prepared_csv_path: str\n port: int\n host: str\n num_workers: int\n\n def __init__(self, dotenv_path: str | os.PathLike) -> None:\n env_dict = dotenv_values(dotenv_path)\n assert env_dict is not None, f\"Please create a file `{dotenv_path}` .\"\n\n def get_or_die(key: str) -> str:\n value = env_dict.get(key)\n if value is None:\n raise ValueError(f\"No such environment variable `{key}`. Check your `.env` file.\")\n return value\n\n self.prepared_csv_path = get_or_die(\"PREPARED_CSV_PATH\")\n self.port = int(env_dict.get(\"PORT\") or \"8080\")\n self.host = env_dict.get(\"HOST\") or \"0.0.0.0\"\n self.num_workers = int(env_dict.get(\"NUM_WORKERS\") or \"1\")\n\n\nPROJECT_ROOT_DIR = pathlib.Path(__file__).parent.parent\n\ncfg = Config(PROJECT_ROOT_DIR / \".env\")\n","repo_name":"ohkilab/SU-expA-final-references","sub_path":"python/app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"20193099618","text":"from flask_restful import Resource, reqparse\nfrom flask_jwt import jwt_required\n\nfrom datetime import datetime\n\nfrom exceptions.ValidationException import ValidationException\nfrom models.CategoryModel import CategoryModel\nfrom models.TransactionModel import TransactionModel\n\nTRANSACTION_NOT_FOUND = {'message': 'Transaction not found'}\nTRANSACTION_NOT_UPDATED = {'message': 'An error occurred updating the transaction'}\nTRANSACTION_NOT_CREATED = {'message': 'An error occurred creating the transaction'}\n\n\ndef validate_category_reference(t):\n if t.categoryId:\n cat = CategoryModel.find_by_id(t.categoryId)\n if not cat:\n raise ValidationException('category ID specified does not exist.')\n\n\nclass Transaction(Resource):\n TABLE_NAME = 'transaction'\n parser = reqparse.RequestParser()\n parser.add_argument('categoryId',\n type=int,\n required=False,\n help='The category to which this transaction belongs')\n parser.add_argument('transactionDate',\n type=lambda x: datetime.strptime(x, '%Y-%m-%dT%H:%M:%S'),\n required=True,\n help='When this transaction occurred')\n parser.add_argument('description',\n type=str,\n required=False,\n help='A description of this transaction',\n )\n parser.add_argument('amount',\n type=lambda x: \"{0:.2f}\".format(float(x)),\n required=True,\n help='Amount of this transaction')\n\n queryParser = reqparse.RequestParser()\n queryParser.add_argument('startDate',\n type=lambda x: datetime.strptime(x, '%Y-%m-%d'),\n location='args',\n required=False,\n help='The earliest date allowed')\n queryParser.add_argument('endDate',\n type=lambda x: datetime.strptime(x, '%Y-%m-%d'),\n location='args',\n required=False,\n help='The latest date allowed')\n queryParser.add_argument('categoryName',\n type=str,\n location='args',\n required=False,\n help='The category name to filter by')\n\n def get(self, transaction_id):\n cat = TransactionModel.find_by_id(transaction_id)\n if not cat:\n return TRANSACTION_NOT_FOUND, 404\n else:\n return cat.json()\n\n @jwt_required()\n def put(self, transaction_id):\n data = Transaction.parser.parse_args()\n\n t = TransactionModel.find_by_id(transaction_id)\n if not t:\n return TRANSACTION_NOT_FOUND, 404\n\n validate_category_reference(t)\n\n transaction = TransactionModel(data)\n transaction.id = transaction_id\n\n try:\n transaction.update()\n except Exception as e:\n print(e)\n return TRANSACTION_NOT_UPDATED, 500\n\n return transaction.json()\n\n @jwt_required()\n def delete(self, transaction_id):\n t = TransactionModel.find_by_id(transaction_id)\n if not t:\n return TRANSACTION_NOT_FOUND, 404\n t.delete_from_db()\n\n return {'message': 'success'}, 200\n\n\nclass TransactionList(Resource):\n def get(self):\n params = Transaction.queryParser.parse_args()\n resp = {'transactions': list(map(lambda t: t.json(), TransactionModel.find_by_params(params)))}\n if not resp['transactions']:\n return TRANSACTION_NOT_FOUND, 404\n return resp\n\n @jwt_required()\n def post(self):\n data = Transaction.parser.parse_args()\n\n t = TransactionModel(data)\n\n validate_category_reference(t)\n\n try:\n t.save_to_db()\n except Exception as err:\n print(err)\n return TRANSACTION_NOT_CREATED, 500\n\n return t.json(), 201\n","repo_name":"sciurolocutus/finance","sub_path":"resources/Transaction.py","file_name":"Transaction.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"20464100940","text":"#++++++++++++++++++++++ IMPORT MODULES AND FUNCTIONS +++++++++++++++++++++++++++++++++++++++++++++\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tkinter import filedialog\nfrom tkinter import Tk\n\nimport sys\nimport tensorflow as tf\nfrom tensorflow import keras\n\nsys.path.insert(0, './lib')\n# from m_open_extension import *\n# from m_fft import *\n# from m_demodulation import *\n# from m_denois import *\n# from m_det_features import *\n# from m_processing import *\n# from decimal import Decimal\nInputs = ['mode']\nInputsOpt_Defaults = {'hola':'chao'}\n\ndef main(argv):\n\tprint('caca')\n\t# tf.logging.set_verbosity(tf.logging.ERROR)\n\t\n\tmodel = keras.Sequential()\n\t# Adds a densely-connected layer with 64 units to the model:\n\t# model.add(keras.layers.Dense(64, activation='relu', input_shape=(1000,32)))\n\tmodel.add(keras.layers.Dense(64, activation='relu'))\n\n\t# Add another:\n\tmodel.add(keras.layers.Dense(64, activation='relu'))\n\t# Add a softmax layer with 10 output units:\n\tmodel.add(keras.layers.Dense(10, activation='softmax'))\n\t\n\t\n\t\n\t# # Create a sigmoid layer:\n\t# layers.Dense(64, activation='sigmoid')\n\t# # Or:\n\t# layers.Dense(64, activation=tf.sigmoid)\n\n\t# # A linear layer with L1 regularization of factor 0.01 applied to the kernel matrix:\n\t# layers.Dense(64, kernel_regularizer=keras.regularizers.l1(0.01))\n\t# # A linear layer with L2 regularization of factor 0.01 applied to the bias vector:\n\t# layers.Dense(64, bias_regularizer=keras.regularizers.l2(0.01))\n\n\t# # A linear layer with a kernel initialized to a random orthogonal matrix:\n\t# layers.Dense(64, kernel_initializer='orthogonal')\n\t# # A linear layer with a bias vector initialized to 2.0s:\n\t# layers.Dense(64, bias_initializer=keras.initializers.constant(2.0))\n\n\t\n\tmodel.compile(optimizer=tf.train.AdamOptimizer(0.001), loss='categorical_crossentropy', metrics=['accuracy'])\n\t\n\t\n\tdata = np.random.random((1000, 32))\n\tprint(data)\n\tlabels = np.random.random((1000, 10))\n\n\tmodel.fit(data, labels, epochs=10, batch_size=32)\n\n\n\t\n\t\n\treturn\n\n# plt.show()\ndef read_parser(argv, Inputs, InputsOpt_Defaults):\n\tInputs_opt = [key for key in InputsOpt_Defaults]\n\tDefaults = [InputsOpt_Defaults[key] for key in InputsOpt_Defaults]\n\tparser = ArgumentParser()\n\tfor element in (Inputs + Inputs_opt):\n\t\tprint(element)\n\t\tif element == 'no_element':\n\t\t\tparser.add_argument('--' + element, nargs='+')\n\t\telse:\n\t\t\tparser.add_argument('--' + element, nargs='?')\n\t\n\targs = parser.parse_args()\n\tconfig = {}\n\tfor element in Inputs:\n\t\tif getattr(args, element) != None:\n\t\t\tconfig[element] = getattr(args, element)\n\t\telse:\n\t\t\tprint('Required:', element)\n\t\t\tsys.exit()\n\n\tfor element, value in zip(Inputs_opt, Defaults):\n\t\tif getattr(args, element) != None:\n\t\t\tconfig[element] = getattr(args, element)\n\t\telse:\n\t\t\tprint('Default ' + element + ' = ', value)\n\t\t\tconfig[element] = value\n\n\treturn config\n\n\nif __name__ == '__main__':\n\tmain(sys.argv)\n","repo_name":"feleaman/nico","sub_path":"tf_rnn_test.py","file_name":"tf_rnn_test.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"23367945182","text":"# -*- encoding: utf-8 -*-\n\nfrom abc import ABCMeta, abstractmethod\n\nfrom cwr_webclient.report import mera as mera_reporter\nfrom cwr_webclient.report import cwr as cwr_reporter\n\n\"\"\"\nOffers services for pagination.\n\"\"\"\n\n__author__ = 'Bernardo Martínez Garrido'\n__license__ = 'MIT'\n__status__ = 'Development'\n\n\nclass ReportService(object):\n __metaclass__ = ABCMeta\n\n def __init__(self):\n pass\n\n @abstractmethod\n def generate_report_excel(self, data, filename):\n raise NotImplementedError(\n 'The generate_report_excel method must be implemented')\n\n\nclass MeraReportService(ReportService):\n def generate_report_excel(self, data, filename):\n return mera_reporter.generate_match_report_excel(data, filename)\n\n\nclass CWRReportService(ReportService):\n def generate_report_excel(self, data, filename):\n return cwr_reporter.generate_cwr_report_excel(data)\n","repo_name":"weso/CWR-WebClient","sub_path":"cwr_webclient/service/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"50"} +{"seq_id":"19260204815","text":"# -*- coding: utf-8 -*-\n\nfrom django.views.generic import View\n\nfrom muayene.views import detail, form\n\n\nclass MuayeneBaseView(View):\n \"\"\"\n According to request return different views.\n\n GET request:\n DetailView for muayene entry.\n\n URL: /muayene/\n : pk of muayene entry.\n\n POST request:\n DetailView have 4 different forms (recete, rapor, lab, file).\n Returns specific FormView according to submit type input's name.\n\n * get_kullanim is for ajax request used by ReceteFormView.\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n view = detail.MuayeneDetailView.as_view()\n\n return view(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n if \"recete_form\" in request.POST:\n view = form.ReceteFormView.as_view()\n elif \"rapor_form\" in request.POST:\n view = form.RaporFormView.as_view()\n elif \"lab_form\" in request.POST:\n view = form.LabIstekFormView.as_view()\n elif \"file_form\" in request.POST:\n view = form.FileUploadFormView.as_view()\n elif \"get_kullanim\" == request.POST.get(\"action\", \"\"):\n view = form.GetIlacKullanimView.as_view()\n\n return view(request, *args, **kwargs)\n","repo_name":"egegunes/hastatakip","sub_path":"src/muayene/views/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"34654550344","text":"_author_ = 'jake'\n_project_ = 'leetcode'\n\n# https://leetcode.com/problems/grid-illumination/\n# On a N x N grid of cells, each cell (x, y) with 0 <= x < N and 0 <= y < N has a lamp.\n# Initially, some number of lamps are on. lamps[i] tells us the location of the i-th lamp that is on.\n# Each lamp that is on illuminates every square on its x-axis, y-axis, and both diagonals (similar to a Queen in chess).\n# For the i-th query queries[i] = (x, y), the answer to the query is 1 if the cell (x, y) is illuminated, else 0.\n# After each query (x, y) [in the order given by queries], we turn off any lamps that are at cell (x, y) or are\n# adjacent 8-directionally (ie., share a corner or edge with cell (x, y).)\n# Return an array of answers. Each value answer[i] should be equal to the answer of the i-th query queries[i].\n\n# For each of the 4 directions (horizontal, vertical and 2 diagonals) count the number of lamps in each line.\n# For each query call, sum the number of lamps shining on that cell across all 4 directions.\n# If there is at least one lamp shining on the query cell, check the 9 cells in and around the query for lamps and\n# decrement the count in each direction for each lamp found.\n# Time - O(m + n), nb lamps + nb queries\n# Space - O(m)\n\nfrom collections import defaultdict\n\nclass Solution(object):\n def gridIllumination(self, N, lamps, queries):\n \"\"\"\n :type N: int\n :type lamps: List[List[int]]\n :type queries: List[List[int]]\n :rtype: List[int]\n \"\"\"\n lamps = {tuple(lamp) for lamp in lamps} # convert to set of O(1) lookup\n x_lamps, y_lamps = defaultdict(int), defaultdict(int) # x_lamps[i] is the count of lamps with x-value of i\n up_diag_lamps, down_diag_lamps = defaultdict(int), defaultdict(int)\n\n for x, y in lamps:\n x_lamps[x] += 1\n y_lamps[y] += 1\n up_diag_lamps[x - y] += 1\n down_diag_lamps[x + y] += 1\n\n result = []\n for x, y in queries:\n illuminated = x_lamps[x] + y_lamps[y] + up_diag_lamps[x - y] + down_diag_lamps[x + y]\n result.append(min(illuminated, 1)) # result of 1 if at least one lamp illuminating x, y\n\n if illuminated != 0:\n for dx in [-1, 0, 1]:\n for dy in [-1, 0, 1]:\n if (x + dx, y + dy) in lamps:\n lamps.discard((x + dx, y + dy))\n x_lamps[x + dx] -= 1\n y_lamps[y + dy] -= 1\n up_diag_lamps[x + dx - y - dy] -= 1\n down_diag_lamps[x + dx + y + dy] -= 1\n\n return result\n","repo_name":"jakehoare/leetcode","sub_path":"python_1001_to_2000/1001_Grid_Illumination.py","file_name":"1001_Grid_Illumination.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"50"} +{"seq_id":"829330868","text":"import numpy as np\nimport pandas as pd\nimport os\nimport json\nimport cv2\nfrom opt import opt\nargs = opt\n\nif __name__ == \"__main__\":\n\n # Loading the json file from the given path.\n input_path = args.inputpath\n json_path = os.path.join(input_path,\"keypoints.json\")\n json_data = json.load(open(json_path))\n original_path = input_path.split('/')[-1]\n\n \n # For each entry in the json file, we are preparing the co-ordinates of the keypoints.\n image_paths = []\n points = []\n for data in json_data:\n image_id = data['image_id']\n path = os.path.join(original_path,image_id)\n img = cv2.imread(path)\n image_shape = img.shape\n keypoints = data['keypoints']\n image_paths.append(image_id)\n cur_points = []\n cur_points.append(tuple([image_shape[0],image_shape[1]]))\n for j in range(0,len(keypoints),3):\n cur_points.append(tuple([keypoints[j],keypoints[j+1]]))\n points.append(cur_points)\n\n point_names = [\n \"image_shape\",\"Nose\",\"LEye\",\"REye\",\"LEar\",\"REar\",\"LShoulder\",\n \"RShoulder\",\"LElbow\",\"RElbow\",\"LWrist\",\"RWrist\",\n \"LHip\",\"RHip\",\"LKnee\",\"RKnee\",\"LAnkle\",\"RAnkle\"\n ]\n \n # Creating a dataframe with the co-ordinates.\n final_points = pd.DataFrame(points,columns=point_names)\n final_paths = pd.DataFrame(image_paths,columns = [\"image_path\"])\n final = pd.concat((final_paths,final_points),axis=1)\n \n # Loading the data.csv file to get the classes of each images.\n data_path = os.path.join(input_path,\"data.csv\")\n df = pd.read_csv(data_path)\n \n # Merging both the dataframes such that every image gets its corresponding class.\n last = pd.merge(final,df,on=\"image_path\")\n output_path = os.path.join(args.outputpath,\"keypoints.csv\")\n \n # Saving the dataframe as csv file.\n last.to_csv(output_path,index=None)\n","repo_name":"i-m-arin/yoga_pose_classification","sub_path":"prepare_keypoints.py","file_name":"prepare_keypoints.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"9259322809","text":"import os\nfrom os.path import exists, join\nimport logging\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.datasets._base import _fetch_remote\n\nfrom .dataset import Dataset\nfrom ._utils import _mkdirp, RemoteFileMetadata, get_data_home\n\n\nARCHIVE = RemoteFileMetadata(\n filename=\"default_cb.xls\",\n url=\"https://archive.ics.uci.edu/ml/machine-learning-databases/00350/default%20of%20credit%20card%20clients.xls\",\n checksum=\"30c6be3abd8dcfd3e6096c828bad8c2f011238620f5369220bd60cfc82700933\",\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef _fetch_default_cb(download_if_missing=True):\n data_home = get_data_home()\n data_dir = join(data_home, \"default_cb\")\n data_path = join(data_dir, \"default_cb.csv.gz\")\n\n if download_if_missing and not exists(data_path):\n _mkdirp(data_dir)\n logger.info(\"Downloading %s\" % ARCHIVE.url)\n _fetch_remote(ARCHIVE, dirname=data_dir)\n logger.debug(\"Converting as a single dataframe with the correct schema\")\n filepath = join(data_dir, ARCHIVE.filename)\n\n df = pd.read_excel(filepath, skiprows=[0])\n\n df.to_csv(data_path, compression=\"gzip\", index=False)\n # Remove temporary files\n os.remove(filepath)\n\n\ndef load_default_cb(download_if_missing=True):\n # Fetch the data is necessary\n _fetch_default_cb(download_if_missing)\n\n data_home = get_data_home()\n data_dir = join(data_home, \"default_cb\")\n data_path = join(data_dir, \"default_cb.csv.gz\")\n\n dtype = {\n \"LIMIT_BAL\": np.int,\n \"SEX\": \"category\",\n \"EDUCATION\": \"category\",\n \"MARRIAGE\": \"category\",\n \"AGE\": np.int,\n # We consider the PAY_* features as continuous, otherwise some modalities are\n # very rare and lead to problems in train/test splitting\n \"PAY_0\": np.int,\n \"PAY_2\": np.int,\n \"PAY_3\": np.int,\n \"PAY_4\": np.int,\n \"PAY_5\": np.int,\n \"PAY_6\": np.int,\n \"BILL_AMT1\": np.int,\n \"BILL_AMT2\": np.int,\n \"BILL_AMT3\": np.int,\n \"BILL_AMT4\": np.int,\n \"BILL_AMT5\": np.int,\n \"BILL_AMT6\": np.int,\n \"PAY_AMT1\": np.int,\n \"PAY_AMT2\": np.int,\n \"PAY_AMT3\": np.int,\n \"PAY_AMT4\": np.int,\n \"PAY_AMT5\": np.int,\n \"PAY_AMT6\": np.int,\n }\n dataset = Dataset.from_dtype(\n name=\"default-cb\",\n task=\"binary-classification\",\n label_column=\"default payment next month\",\n dtype=dtype,\n drop_columns=[\"ID\"],\n )\n return dataset.load_from_csv(data_path, dtype=dtype)\n","repo_name":"linlearn/linlearn","sub_path":"linlearn/datasets/_default_cb.py","file_name":"_default_cb.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"50"} +{"seq_id":"17455774060","text":"from collections import namedtuple\nimport numpy as np\nfrom beamngpy import Vehicle, BeamNGpy\nfrom beamngpy.sensors import Electrics, Timer, Sensor\nfrom typing import List, Tuple\n\nVehicleStateProperties = ['timer', 'pos', 'dir', 'vel', 'steering', 'steering_input',\n 'brake', 'brake_input', 'throttle', 'throttle_input', \n 'wheelspeed', 'vel_kmh']\n\nVehicleState = namedtuple('VehicleState', VehicleStateProperties)\n\n\nclass VehicleStateReader:\n def __init__(self, vehicle: Vehicle, beamng: BeamNGpy, additional_sensors: List[Tuple[str, Sensor]] = None):\n self.vehicle = vehicle\n self.beamng = beamng\n self.state: VehicleState = None\n self.vehicle_state = {}\n\n electrics = Electrics()\n timer = Timer()\n\n self.vehicle.attach_sensor('electrics', electrics)\n self.vehicle.attach_sensor('timer', timer)\n\n if additional_sensors:\n for (name, sensor) in additional_sensors:\n self.vehicle.attach_sensor(name, sensor)\n\n def get_state(self) -> VehicleState:\n return self.state\n\n def get_vehicle_bbox(self) -> dict:\n return self.vehicle.get_bbox()\n\n def update_state(self):\n sensors = self.beamng.poll_sensors(self.vehicle)\n self.sensors = sensors\n\n self.vehicle.update_vehicle()\n st = self.vehicle.state\n\n ele = sensors['electrics']\n\n vel = tuple(st['vel'])\n self.state = VehicleState(timer=sensors['timer']['time']\n , pos=tuple(st['pos'])\n , dir=tuple(st['dir'])\n , vel=vel\n , steering=ele.get('steering', None)\n , steering_input=ele.get('steering_input', None)\n , brake=ele.get('brake', None)\n , brake_input=ele.get('brake_input', None)\n , throttle=ele.get('throttle', None)\n , throttle_input=ele.get('throttle_input', None)\n , wheelspeed=ele.get('wheelspeed', None)\n , vel_kmh=int(round(np.linalg.norm(vel) * 3.6)))\n","repo_name":"testingautomated-usi/DeepHyperion","sub_path":"DeepHyperion-BNG/self_driving/vehicle_state_reader.py","file_name":"vehicle_state_reader.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"4"} +{"seq_id":"39532447980","text":"import pygame, sys\npygame.init()\n\ndef count():\n\n backImg = pygame.image.load('Graphics/Background_Pause.jpg')\n backImg = pygame.transform.scale(backImg, (1280, 700))\n back = pygame.Rect(0, 0, 700, 1280)\n \n blank = pygame.Rect(427,87, 500,426)\n\n size = [1280, 700]\n screen = pygame.display.set_mode(size)\n \n pygame.display.set_caption(\"COUNTDOWN\")\n clock = pygame.time.Clock()\n \n count = 3\n\n while count > 0:\n \n for event in pygame.event.get():\n \n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit(0)\n \n screen.blit(backImg, back)\n \n count -= 1\n \n if count == 0:\n break\n\n \n numImg = pygame.image.load('Graphics/Countdown/num-'+str(count)+'.gif')\n numImg = pygame.transform.scale(numImg, (426,500))\n \n screen.blit(numImg,blank)\n \n pygame.display.flip()\n \n clock.tick(1)\n \n\ncount()","repo_name":"thebazman1998/School-Projects","sub_path":"Grade 10/Unit 2 Game/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"32693962255","text":"from datetime import datetime\r\nfrom PIL import Image\r\nimport pandas as pd\r\nimport plotly.express as px\r\nimport streamlit as st\r\n\r\nst.set_page_config(page_title='Visão Entregadores',\r\n page_icon='🚚', layout='wide')\r\n\r\n\r\ndef clean_database(dataframe):\r\n '''Esta função tem a responsabilidade de limpar o dataframe\r\n Tipos de limpeza:\r\n 1 - Remoção dos dados NaN\r\n 2 - Mudança do tipo da coluna de dados\r\n 3 - Remoção dos espaços das variáveis de texto\r\n 4 - Formatação da coluna de datas\r\n 5 - Limpeza da coluna de tempo (remoção do texto da variável numérica)\r\n\r\n Input: Dataframe\r\n Output: Dataframe\r\n '''\r\n # Removendo linhas com NaN da base de dados\r\n linhas_selecionadas = dataframe['Delivery_person_Age'] != 'NaN '\r\n dataframe = dataframe.loc[linhas_selecionadas, :].copy()\r\n\r\n linhas_selecionadas = dataframe['multiple_deliveries'] != 'NaN '\r\n dataframe = dataframe.loc[linhas_selecionadas, :].copy()\r\n\r\n linhas_selecionadas = dataframe['Road_traffic_density'] != 'NaN '\r\n dataframe = dataframe.loc[linhas_selecionadas, :].copy()\r\n\r\n linhas_selecionadas = dataframe['City'] != 'NaN '\r\n dataframe = dataframe.loc[linhas_selecionadas, :].copy()\r\n\r\n linhas_selecionadas = dataframe['Festival'] != 'NaN '\r\n dataframe = dataframe.loc[linhas_selecionadas, :].copy()\r\n\r\n # Remover o texto de numeros --> '(min) 24'\r\n dataframe['Time_taken(min)'] = dataframe['Time_taken(min)'].apply(\r\n lambda x: x.split('(min) ')[1])\r\n\r\n # Converter os tipos das colunas\r\n dataframe['Delivery_person_Age'] = dataframe['Delivery_person_Age'].astype(\r\n int)\r\n\r\n dataframe['multiple_deliveries'] = dataframe['multiple_deliveries'].astype(\r\n int)\r\n\r\n dataframe['Delivery_person_Ratings'] = dataframe['Delivery_person_Ratings'].astype(\r\n float)\r\n\r\n dataframe['Time_taken(min)'] = dataframe['Time_taken(min)'].astype(int)\r\n\r\n # Precisa usar a biblioteca pandas quando se trata de converter em data\r\n dataframe['Order_Date'] = pd.to_datetime(\r\n dataframe['Order_Date'], format='%d-%m-%Y')\r\n\r\n # Resetar o index\r\n dataframe = dataframe.reset_index(drop=True)\r\n\r\n # Remover os espaços em branco dentro das strings de ID\r\n dataframe.loc[:, 'ID'] = dataframe.loc[:, 'ID'].str.strip()\r\n dataframe.loc[:, 'Road_traffic_density'] = dataframe.loc[:,\r\n 'Road_traffic_density'].str.strip()\r\n dataframe.loc[:, 'Type_of_order'] = dataframe.loc[:,\r\n 'Type_of_order'].str.strip()\r\n dataframe.loc[:, 'Type_of_vehicle'] = dataframe.loc[:,\r\n 'Type_of_vehicle'].str.strip()\r\n dataframe.loc[:, 'City'] = dataframe.loc[:, 'City'].str.strip()\r\n dataframe.loc[:, 'Festival'] = dataframe.loc[:, 'Festival'].str.strip()\r\n\r\n return dataframe\r\n\r\n\r\ndef top_delivers(df, top_asc):\r\n df = df.loc[:, ['Delivery_person_ID', 'City', 'Time_taken(min)']].groupby(\r\n ['City', 'Delivery_person_ID']).max().sort_values(['City', 'Time_taken(min)'], ascending=top_asc).reset_index()\r\n\r\n df_aux1 = df.loc[df['City'] == 'Metropolitian', :].head(10)\r\n df_aux2 = df.loc[df['City'] == 'Urban', :].head(10)\r\n df_aux3 = df.loc[df['City'] == 'Semi-Urban', :].head(10)\r\n\r\n df = pd.concat([df_aux1, df_aux2, df_aux3]).reset_index()\r\n\r\n return df\r\n\r\n\r\n# Import dataset\r\ndf = pd.read_csv('./dataset/train.csv')\r\n\r\n# Limpando os dados\r\ndf1 = clean_database(df)\r\n\r\n# ===============================================================================\r\n# Barra lateral - Streamlit\r\n# ===============================================================================\r\n\r\nst.header('Marketplace - Visão Empresa')\r\n\r\nimage_path = './image/logo.jpg'\r\nimage = Image.open(image_path)\r\nst.sidebar.image(image, width=200)\r\n\r\nst.sidebar.markdown('# Curry Company')\r\nst.sidebar.markdown('## Fastest Delivery in Town')\r\nst.sidebar.markdown('''---''')\r\n\r\n\r\nst.sidebar.markdown('## Selecione uma data limite')\r\ndate_slider = st.sidebar.slider(\r\n 'Até qual valor?',\r\n value=datetime(2022, 4, 13),\r\n min_value=datetime(2022, 2, 11),\r\n max_value=datetime(2022, 4, 6),\r\n format='DD-MM-YYYY'\r\n)\r\n\r\nst.sidebar.markdown('''---''')\r\n\r\ntraffic_options = st.sidebar.multiselect(\r\n 'Quais as condições do trânsito',\r\n ['Low', 'Medium', 'High', 'Jam'],\r\n default=['Low', 'Medium', 'High', 'Jam']\r\n)\r\n\r\nst.sidebar.markdown('''---''')\r\nst.sidebar.markdown('### Powered by Robson ❤️')\r\n\r\n# =========================\r\n# Filtros\r\n# =========================\r\n# Filtro de data\r\nlinhas_selecionadas = df1['Order_Date'] < date_slider\r\ndf1 = df1.loc[linhas_selecionadas, :]\r\n\r\n# Filtro de tânsito\r\nlinhas_selecionadas = df1['Road_traffic_density'].isin(traffic_options)\r\ndf1 = df1.loc[linhas_selecionadas, :]\r\n\r\n# ===============================================================================\r\n# layout - Streamlit\r\n# ===============================================================================\r\n\r\ntab1, tab2, tab3 = st.tabs(['Visão Gerencial', '_', '_'])\r\n\r\nwith tab1:\r\n with st.container():\r\n st.title('Overall Metrics')\r\n col1, col2, col3, col4 = st.columns(4, gap='large')\r\n\r\n with col1:\r\n # st.subheader('Maior idade')\r\n # Maior idade dos entregadores\r\n maior_idade = df1.loc[:, 'Delivery_person_Age'].max()\r\n col1.metric(' Maior Idade', maior_idade)\r\n\r\n with col2:\r\n # st.subheader('Menor idade')\r\n # Menor idade dos entregadores\r\n menor_idade = df1.loc[:, 'Delivery_person_Age'].min()\r\n col2.metric(' Menor idade', menor_idade)\r\n\r\n with col3:\r\n # st.subheader('Melhor condição de veículos')\r\n # condições dos veiculos\r\n melhor_condicao = df1.loc[:, 'Vehicle_condition'].max()\r\n col3.metric('Melhor condição de veículo', melhor_condicao)\r\n\r\n with col4:\r\n # st.subheader('Pior condição de veículos')\r\n # condições dos veiculos\r\n pior_condicao = df1.loc[:, 'Vehicle_condition'].min()\r\n col4.metric('Pior condição de veículo', pior_condicao)\r\n\r\n with st.container():\r\n st.markdown('''---''')\r\n st.title('Avaliações')\r\n\r\n col1, col2 = st.columns(2)\r\n\r\n with col1:\r\n st.markdown('##### Avalicao medias por Entregador')\r\n df_avg_ratings_per_deliver = (df1.loc[:, ['Delivery_person_Ratings', 'Delivery_person_ID']]\r\n .groupby('Delivery_person_ID')\r\n .mean()\r\n .reset_index())\r\n st.dataframe(df_avg_ratings_per_deliver)\r\n\r\n with col2:\r\n st.markdown(' ##### Avalisção média por transito')\r\n df_avg_std_rating_by_traffic = (df1.loc[:, ['Delivery_person_Ratings', 'Road_traffic_density']].groupby(\r\n 'Road_traffic_density').agg({'Delivery_person_Ratings': ['mean', 'std']}))\r\n\r\n df_avg_std_rating_by_traffic.columns = [\r\n 'delirery_mean', 'delirery_std']\r\n\r\n df_avg_std_rating_by_traffic.reset_index()\r\n st.dataframe(df_avg_std_rating_by_traffic)\r\n\r\n st.markdown(' ##### Avalisção média por clima')\r\n df_avg_std_rating_by_wather = df1.loc[:, ['Delivery_person_Ratings', 'Weatherconditions']].groupby(\r\n 'Weatherconditions').agg({'Delivery_person_Ratings': ['mean', 'std']})\r\n\r\n df_avg_std_rating_by_wather.columns = [\r\n 'delirery_mean', 'delirery_std']\r\n\r\n df_avg_std_rating_by_wather.reset_index()\r\n st.dataframe(df_avg_std_rating_by_wather)\r\n\r\n with st.container():\r\n st.markdown('''---''')\r\n st.title('Velocidade de Entrega')\r\n\r\n col1, col2 = st.columns(2)\r\n with col1:\r\n st.subheader('Top entregadores mais rápidos')\r\n df2 = top_delivers(df1, top_asc=True)\r\n st.dataframe(df2)\r\n\r\n with col2:\r\n st.subheader('Top entregadores mais lentos')\r\n df2 = top_delivers(df1, top_asc=False)\r\n st.dataframe(df2)\r\n","repo_name":"robsonlopesjr/curry-company","sub_path":"pages/02_visao_entregadores.py","file_name":"02_visao_entregadores.py","file_ext":"py","file_size_in_byte":8351,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"72483216756","text":"from typing import Any, Union, List, Dict, cast\n\nimport torch\nimport torch.nn as nn\n\n__all__ = ['VGG', 'vgg16', 'vgg16_bn']\n\n\nclass VGG(nn.Module):\n \"\"\"\n 参考:torchvision.models.VGG\n \"\"\"\n\n def __init__(self, features: nn.Module, num_classes: int = 1000) -> None:\n super(VGG, self).__init__()\n\n self.features = features\n self.avgpool = nn.AdaptiveAvgPool2d((7, 7))\n self.classifier = nn.Sequential(\n nn.Linear(512 * 7 * 7, 4096), nn.ReLU(inplace=True),\n nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True),\n nn.Dropout(), nn.Linear(4096, num_classes),\n )\n\n # Official init from torch repo.\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n # nn.init.kaiming_normal_(m.weight)\n nn.init.xavier_uniform_(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n # nn.init.normal_(m.weight, 0.01)\n nn.init.xavier_uniform_(m.weight)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.features(x)\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.classifier(x)\n return x\n\n\ndef _make_layers(cfg: List[Union[str, int]], in_planes: int = 3, batch_norm: bool = False) -> nn.Sequential:\n layers: List[nn.Module] = []\n\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n v = cast(int, v)\n conv2d = nn.Conv2d(in_planes, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_planes = v\n return nn.Sequential(*layers)\n\n\ncfgs: Dict[str, List[Union[str, int]]] = {\n 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n}\n\n\ndef _vgg(cfg: str, in_planes: int, batch_norm: bool, **kwargs: Any) -> VGG:\n return VGG(_make_layers(cfgs[cfg], in_planes, batch_norm=batch_norm), **kwargs)\n\n\ndef vgg16(in_planes: int = 3, **kwargs: Any) -> VGG:\n return _vgg('D', in_planes, False, **kwargs)\n\n\ndef vgg16_bn(in_planes: int = 3, **kwargs: Any) -> VGG:\n return _vgg('D', in_planes, True, **kwargs)\n\n\nif __name__ == '__main__':\n x = torch.randn(([32, 3, 224, 224]))\n net = vgg16()\n for layer in net.features:\n x = layer(x)\n print(layer.__class__.__name__, 'output shape: \\t', x.shape)\n x = net.avgpool(x)\n print(net.avgpool.__class__.__name__, 'output shape: \\t', x.shape)\n x = torch.flatten(x, 1)\n for layer in net.classifier:\n x = layer(x)\n print(layer.__class__.__name__, 'output shape: \\t', x.shape)\n","repo_name":"gnudennis/applied-image-processing-with-deep-learning","sub_path":"aip/models/vgg.py","file_name":"vgg.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"39692659062","text":"\"\"\" unit_tests \"\"\"\n\nfrom django.test import TestCase\n\nfrom construction_work.generic_functions.hashing import Hashing\n\n\nclass TestHashing(TestCase):\n \"\"\"Unittest for convenient library hashing\"\"\"\n\n def test_make_md5_hash(self):\n \"\"\"Test create md5 hash\"\"\"\n data = \"mock\"\n hashing = Hashing()\n result = hashing.make_md5_hash(data)\n\n self.assertEqual(result, \"17404a596cbd0d1e6c7d23fcd845ab82\")\n\n def test_make_sha1_hash(self):\n \"\"\"Test create sha1 hash\"\"\"\n data = \"mock\"\n hashing = Hashing()\n result = hashing.make_sha1_hash(data)\n\n self.assertEqual(len(result), 40)\n","repo_name":"Amsterdam/amsterdam-app-construction-work","sub_path":"construction_work/unit_tests/tests_hashing.py","file_name":"tests_hashing.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"13477132341","text":"n = int(input())\nx, y = 0, n - 1\nlx, ly = 0, 0\nspir = [[0] * n for i in range(n)]\nflag = n\ndl = n - 1\n\n\ndef pr(data: list):\n for i in data:\n print(i)\n print()\n\n\nfor i in range(n):\n spir[0][i] = i + 1\n\nwhile True:\n for r in range(dl):\n x += 1\n flag += 1\n spir[x][y] = flag\n pr(spir)\n for c in range(dl):\n y -= 1\n flag += 1\n spir[x][y] = flag\n pr(spir)\n dl -= 1\n for r in range(dl):\n x -= 1\n flag += 1\n spir[x][y] = flag\n pr(spir)\n for c in range(dl):\n y += 1\n flag += 1\n spir[x][y] = flag\n pr(spir)\n if flag == n*n:\n break\n dl -= 1\n\n\n # 00 01 02 03 04\n # 10 11 12 13 14\n # 20 21 22 23 24\n # 30 31 32 33 34\n # 40 41 42 43 44\n\nprint(spir)","repo_name":"Aliastrip/Study","sub_path":"2.6.5.py","file_name":"2.6.5.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"7868673174","text":"n = int(input())\n\nl = []\n\nfor _ in range(n):\n l.append(int(input()))\n\ndp=[0]*(n) # dp 리스트\nif len(l)<=2: # 계단이 2개 이하일땐 그냥 다 더해서 출력\n print(sum(l))\nelse: # 계단이 3개 이상일 때\n dp[0]=l[0] # 첫째 계단 수동 계산\n dp[1]=l[0]+l[1] # 둘째 계단까지 수동 계산\n for i in range(2,n): # 3번째 계단 부터 dp 점화식 이용해서 최대값 구하기\n dp[i]=max(dp[i-3]+l[i-1]+l[i], dp[i-2]+l[i])\n print(dp[-1])","repo_name":"4RG0S/2023-Hamgorithm-Spring","sub_path":"202102680/2579.py","file_name":"2579.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"ko","doc_type":"code","stars":7,"dataset":"github-code","pt":"4"} +{"seq_id":"17426778550","text":"#!/usr/bin/env python\n# coding=utf-8\nimport os\nimport sys\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(BASE_DIR)\nfrom lib import bin\nfrom modules.student import Student\nfrom conf.setting import login_info\n\n\ndef login(func):\n def wrapper(*args, **kwargs):\n if not login_info['isLogin']:\n print('你还没有登录')\n print('你现在只有两条路,登录1和注册2')\n ch = input('请输入你的选择: ')\n if ch == '1':\n stus = bin.get_objs(Student)\n name = input('请输入你的学员名称')\n flag = False\n for stu in stus:\n if stu.uaccount == name:\n pwd = input('请输入你的密码')\n if stu.upwd == pwd:\n flag = True\n print('登录成功')\n login_info['uaccount'] = stu\n login_info['isLogin'] = True\n login_info['type'] = 'Student'\n return func(*args, **kwargs)\n else:\n print('密码错误')\n return\n if not flag:\n print('不存在的用户名')\n return\n elif ch == '2':\n s = Student.register()\n if s is None:\n return\n login_info['uaccount'] = s\n login_info['isLogin'] = True\n login_info['type'] = 'Student'\n return func(*args, **kwargs)\n\n return wrapper\n\n\n@login\ndef student_view():\n s = login_info['uaccount']\n while True:\n print('学生操作选项'.center(50, '*'))\n print('1.报名课程')\n print('2.查看成绩')\n print('q.退出')\n ch = input('请输入要操作的选项: ')\n if ch == 'q':\n break\n if ch == '1':\n s.enroll_to_class()\n elif ch == '2':\n s.show_scores()\n else:\n print('选项输入错误')\n\nif __name__=='__main__':\n student_view()\n","repo_name":"5oe/djangos","sub_path":"others/homework/core/student_enter.py","file_name":"student_enter.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"1745167936","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 3 22:32:51 2021\n\n@author: igorvanloo\n\"\"\"\n\n'''\nProject Euler Problem 87\n\nThe smallest number expressible as the sum of a prime square, prime cube, and prime fourth power is 28. \nIn fact, there are exactly four numbers below fifty that can be expressed in such a way:\n\n28 = 2^2 + 2^3 + 2^4\n33 = 32 + 23 + 24\n49 = 52 + 23 + 24\n47 = 22 + 33 + 24\n\nHow many numbers below fifty million can be expressed as the sum of a prime square, prime cube, and prime fourth power?\n\nReasoning:\n First ill test to see which primes number is the closest when squared, cubed, and 4th powered\n 7069^2 = 49970761\n 367^3 = 49430863\n 83^4 = 47458321\n \n so we can create a simple loop and add up all the numbers that are less than 50,000,000\n \nAnwser:\n 1097343\n--- 1.5371170043945312 seconds ---\n \n'''\n\nimport time\nfrom eulerlib import primes\nstart_time = time.time()\n\ndef compute():\n squared_prime_numbers = primes(7070)\n cubed_prime_numbers = primes(368)\n fourth_prime_numbers = primes(84)\n numbers = []\n \n for x in squared_prime_numbers:\n for y in cubed_prime_numbers:\n for z in fourth_prime_numbers:\n anw = x**2 + y**3 + z**4\n if anw < 40000000:\n numbers.append(anw)\n \n return len(set(numbers))\n \n\nif __name__ == \"__main__\":\n print(compute())\n print(\"--- %s seconds ---\" % (time.time() - start_time))","repo_name":"igorvanloo/Project-Euler-Explained","sub_path":"Finished Problems/pe00087 - Prime power triples.py","file_name":"pe00087 - Prime power triples.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"4"} +{"seq_id":"27833691773","text":"import discord\r\nfrom discord.ext import commands\r\n\r\nclass Ping(commands.Cog):\r\n def __init__(self, bot):\r\n self.bot = bot\r\n @commands.command()\r\n async def ping(self, ctx):\r\n \r\n latency = round(self.bot.latency * 1000)\r\n\r\n if latency <= 199:\r\n latencyColor = 0x3be801\r\n elif 199 < latency < 499:\r\n latencyColor = 0xff6600\r\n else:\r\n latencyColor = 0xE80303\r\n\r\n embed = discord.Embed(color=latencyColor)\r\n embed.add_field(name='Pong!', value=f\"Bot latency: {latency} ms \", inline=False)\r\n \r\n await ctx.send(embed=embed)\r\n \r\n\r\ndef setup(bot):\r\n bot.add_cog(Ping(bot))\r\n ","repo_name":"J0k3rrWild/XantBot","sub_path":"cogs/Ping-1,0.py","file_name":"Ping-1,0.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"33433827997","text":"import psycopg2\nimport psycopg2.extras\n\nclass database():\n def __init__(self) -> None:\n \"\"\"\n Initiate the database class by creating a cursor that connects to a (in this case) local database\n \"\"\" \n self.cursor = self.getCursor()\n \n def getCursor(self) -> object:\n \"\"\"Creates a connection curse to perform operations in the database\n\n Returns:\n cursor (object): connection cursor\n \"\"\" \n\n conn = psycopg2.connect(database=\"student details\",user=\"postgres\",password=\"12345\",host=\"localhost\",port=\"5432\")\n conn.autocommit = True\n cur = conn.cursor()\n self.conn = conn\n\n return cur\n\n def executeQuery(self, query, tup : tuple = None) -> None:\n \"\"\"Executes a query/Performs an operation in postgresql \n\n Args:\n query (str): the query to be executed\n tup (tuple, optional): A tuple that can be mapped to format values in the query. Defaults to None.\n \"\"\" \n\n if tup != None:\n self.cursor.execute(query, tup)\n else:\n self.cursor.execute(query)\n\n \n def selectUID(self, uid: str) -> tuple:\n \"\"\"Gives the row for the provided UID\n\n Args:\n uid (str): the user ID to identify a row\n\n Returns:\n tuple: resulted row that is found\n \"\"\" \n\n \"\"\"tuple- (id, name, addr, cgpa, 10th%, 12th%, remarks, phone)\"\"\"\n query = \"SELECT * FROM student_info WHERE uid = %s\"\n self.executeQuery(query, tup = (uid,)) \n\n res = self.cursor.fetchall()[0]\n return res\n \n def insertValues(self, uid: str, name: str, address: str,\n curCGPA: float, percent10th: float, \n percent12th: float, remarks: str, phone: str) -> None:\n \"\"\"\n A function to insert values in a row\n Args:\n uid (str): User ID (primary key)\n name (str): Name of the user\n address (str): Address of the user\n curCGPA (float): current CGPA of the user\n percent10th (float): 10th percentage of the user\n percent12th (float): 12th percentage of the user\n remarks (str): remarks by the mentor\n phone (str): Phone no. of the user\n \"\"\" \n\n query = \"INSERT INTO student_info VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\"\n self.executeQuery(query, tup = (uid, name, address, curCGPA, percent10th, percent12th, remarks, phone))\n\n\ndb = database()\nif __name__ == \"__main__\":\n db.insertValues('6687', 'Aaryan', 'Somewhere', 8.6, 98.5, 99.9, 'Big', '9891558594')\n print(db.selectUID('6668'))","repo_name":"4RCAN3/FaceCheck","sub_path":"dataEntry.py","file_name":"dataEntry.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"552957252","text":"import pytest\nimport requests_mock\nfrom tests.bch.objects import obj_bitdbquery\nfrom tests.system.objects import obj_save_record\n\n@pytest.mark.django_db\ndef test_bitdbquery_transaction(requests_mock, monkeypatch, capsys):\n source = 'bitdbquery'\n # Test BitDBQuery from tasks.py\n script = obj_bitdbquery.BitDBQueryTest(requests_mock, capsys)\n script.test()\n\n # Test recording of Transaction\n outputs = getattr(script, 'output', None).split(\"\\n\")\n assert(outputs)\n for output in outputs:\n saving = obj_save_record.SaveRecordTest()\n args = saving.build_payload(output)\n if args:\n saving = obj_save_record.SaveRecordTest()\n saving.test(*args)\n assert saving.address == args[1]\n assert saving.txid == args[2]\n assert saving.amount == args[3]\n assert saving.source == args[4] == source\n assert saving.index == args[6]","repo_name":"paytaca/watchtower-cash","sub_path":"tests/bch/unittests/test_bitdbquery.py","file_name":"test_bitdbquery.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"4"} +{"seq_id":"31211796619","text":"# Ler vários números em uma lista, criar mais duas uma com os números impares outro com os números pares\n'''\nvalores = list()\nimpar = list()\npar = list()\n\nwhile True:\n valores.append(int(input('Digite um valor: ')))\n\n op = ''\n\n op = str(input('Quer falar outro valor? [S/N] ')).strip().lower()\n if op == 'n':\n break\n\n\nfor c in range(0, len(valores)):\n if valores[c] % 2 != 0:\n impar.append(valores[c])\n else:\n par.append(valores[c])\n\nprint('-=' * 26)\nprint(f'A lista completa é {valores}')\nprint(f'Os valores pares foram {par}')\nprint(f'Os valores impares foram {impar}')\n'''\n#Guanabara fez\n\nnum = list()\npares = list()\nimpares = list()\nwhile True:\n num.append(int(input('Digite um número: ')))\n resp = str(input('Quer continuar? [S/N] '))\n if resp in 'Nn':\n break\nfor i, v in enumerate(num):\n if v % 2 == 0:\n pares.append(v)\n else:\n impares.append(v)\nprint('-=' * 30)\nprint(f'A lista completa é {num}')\nprint(f'A lista de pares é {pares}')\nprint(f'A lista de impares é {impares}')\n","repo_name":"dyonathan/CursoemVideo","sub_path":"Mundo 3/PythonExercicios/ex082 lista normal, lista par e lista impar.py","file_name":"ex082 lista normal, lista par e lista impar.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"5891014764","text":"import re\nimport time\nimport datetime\nimport signal\nimport sys\nimport collections\n\nfrom libuno.exception import UvnException\nfrom libuno.yml import YamlSerializer, repr_yml, repr_py\n\nclass Timestamp:\n\n epoch = datetime.datetime.utcfromtimestamp(0)\n\n default_format = \"%Y%m%d-%H%M%S\"\n\n def __init__(self, ts):\n self._ts = ts\n \n def subtract(self, ts):\n self_ts = time.mktime(self._ts)\n if (isinstance(ts, Timestamp)):\n other_ts = time.mktime(ts._ts)\n else:\n other_ts = time.mktime(ts)\n return self_ts - other_ts\n \n def format(self, fmt=None):\n if (fmt is None):\n fmt = Timestamp.default_format\n return time.strftime(fmt, self._ts)\n \n\n def millis(self):\n ts = datetime.datetime.fromtimestamp(time.mktime(self._ts))\n return (ts - Timestamp.epoch).total_seconds() * 1000.0\n \n def __str__(self):\n return self.format()\n \n @staticmethod\n def parse(val, fmt = None):\n if (fmt is None):\n fmt = Timestamp.default_format\n ts = time.strptime(val, fmt)\n return Timestamp(ts)\n\n @staticmethod\n def now():\n return Timestamp(time.gmtime())\n\n @staticmethod\n def unix(t):\n return Timestamp(time.gmtime(int(t)))\n\n\n class _YamlSerializer(YamlSerializer):\n\n def repr_yml(self, py_repr, **kwargs):\n return py_repr.format()\n \n def repr_py(self, yml_repr, **kwargs):\n return Timestamp.parse(yml_repr)\n\nclass Duration:\n\n def __init__(self, tdelta):\n self._tdelta = tdelta\n \n def total_seconds(self):\n return self._tdelta.total_seconds()\n \n class _YamlSerializer(YamlSerializer):\n \n def repr_yml(self, py_repr, **kwargs):\n yml_repr = py_repr.total_seconds()\n return yml_repr\n \n def repr_py(self, yml_repr, **kwargs):\n py_repr = Duration(tdelta = datetime.timedelta(seconds=yml_repr))\n return py_repr\n\nclass ActivityMonitor:\n \"\"\"An object which helps keeping track of the state of another based on its\n \"activity\".\"\"\"\n\n def __init__(self, activity_timeout, last_activity = None):\n self._last_activity = last_activity\n self._activity_timeout = Duration(tdelta = activity_timeout)\n \n def mark_active(self):\n self._last_activity = Timestamp.now()\n \n def is_active(self):\n if self._last_activity is None:\n return False\n now = Timestamp.now()\n diff = now.subtract(self._last_activity)\n return (diff <= self._activity_timeout.total_seconds())\n \n class _YamlSerializer(YamlSerializer):\n def repr_yml(self, py_repr, **kwargs):\n yml_repr = dict()\n if (py_repr._last_activity is not None):\n yml_repr[\"last_activity\"] = repr_yml(\n py_repr._last_activity, **kwargs)\n yml_repr[\"activity_timeout\"] = repr_yml(\n py_repr._activity_timeout, **kwargs)\n yml_repr[\"active\"] = py_repr.is_active()\n return yml_repr\n \n def repr_py(self, yml_repr, **kwargs):\n last_activity = None\n if (\"last_activity\" in yml_repr):\n last_activity = repr_py(Timestamp, \n yml_repr[\"last_activity\"], **kwargs)\n activity_timeout = repr_py(Duration, \n yml_repr[\"activity_timeout\"], **kwargs)\n py_repr = ActivityMonitor(\n activity_timeout = activity_timeout,\n last_activity = last_activity)\n return py_repr\n\ndef validate_fqdn(fqdn):\n \"\"\"Checks that provided string is a valid FQDN.\n \n Adapted from:\n https://stackoverflow.com/questions/2532053/validate-a-hostname-string\n\n The function ensures that each segment:\n\n - contains at least one character and a maximum of 63 characters\n - consists only of allowed characters\n - doesn't begin or end with a hyphen.\n\n It also avoids double negatives (not disallowed).\n \"\"\"\n if len(fqdn) > 255:\n return False\n allowed = re.compile(r\"(?!-)[A-Z\\d-]{1,63}(?= self._range.stop:\n raise StopIteration()\n self._cur = self._cur + self._range.incr\n return self._cur\n\ndef humanbytes(B):\n 'Return the given bytes as a human friendly KB, MB, GB, or TB string'\n B = float(B)\n KB = float(1024)\n MB = float(KB ** 2) # 1,048,576\n GB = float(KB ** 3) # 1,073,741,824\n TB = float(KB ** 4) # 1,099,511,627,776\n\n if B < KB:\n return '{0} {1}'.format(B,'Bytes' if 0 == B > 1 else 'Byte')\n elif KB <= B < MB:\n return '{0:.2f} KB'.format(B/KB)\n elif MB <= B < GB:\n return '{0:.2f} MB'.format(B/MB)\n elif GB <= B < TB:\n return '{0:.2f} GB'.format(B/GB)\n elif TB <= B:\n return '{0:.2f} TB'.format(B/TB)\n\n\n\nclass MonitorThread(threading.Thread):\n\n def __init__(self, name, min_wait=0):\n threading.Thread.__init__(self, daemon=True)\n # set thread name\n self.name = name\n self._min_wait = min_wait\n self._queued = False\n self._lock = threading.RLock()\n self._sem_run = threading.Semaphore()\n self._sem_run.acquire()\n self._sem_exit = threading.BoundedSemaphore()\n self._sem_exit.acquire()\n\n def trigger(self):\n with self._lock:\n if self._queued:\n return\n self._queued = True\n self._sem_run.release()\n \n def _do_monitor(self):\n raise NotImplementedError()\n\n def run(self):\n complete = False\n while not (complete or self._exit):\n self._sem_run.acquire()\n\n run = False\n with self._lock:\n run = self._queued\n if run:\n self._queued = False\n if run:\n self._do_monitor()\n\n if self._min_wait:\n complete = self._sem_exit.acquire(timeout=self._min_wait)\n else:\n complete = self._sem_exit.acquire(blocking=False)\n\n def start(self):\n self._exit = False\n threading.Thread.start(self)\n\n def stop(self):\n if not self.is_alive():\n return\n self._exit = True\n self._sem_exit.release()\n self._sem_run.release()\n self.join()\n","repo_name":"mentalsmash/uno","sub_path":"libuno/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":25227,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"4"} +{"seq_id":"70384304758","text":"from einops import rearrange\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom transformers import Wav2Vec2ForPreTraining\n\nclass Wav2Vec2(nn.Module):\n '''Input = (B, T, C) '''\n '''Output = (B, T, C) '''\n def __init__(self, url, zoo):\n super().__init__()\n self.wav2vec2 = self.load_wav2vec2(url, zoo)\n\n def forward(self, x, target_state=-1):\n x = rearrange(x, 'b t 1 -> b t')\n \n if target_state == -1:\n output_hidden_states = False\n else:\n output_hidden_states = True\n x = self.wav2vec2(x, output_hidden_states=output_hidden_states)\n\n if target_state == -1:\n x = x['projected_states']\n else:\n x = x['hidden_states'][target_state]\n\n return x\n\n def load_wav2vec2(self, url, zoo):\n if zoo == 'huggingface':\n teacher = Wav2Vec2ForPreTraining.from_pretrained(url)\n else:\n raise NotImplementedError\n\n return teacher","repo_name":"gcambara/speech-commands","sub_path":"src/modules/encoders.py","file_name":"encoders.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"11824888456","text":"# coding: utf-8\n\nfrom marshmallow import Schema, fields\n\nfrom core.models.hoard.xinmi import XMOrder\nfrom core.models.hoard.xinmi.product import XMProduct\nfrom core.models.hoard.xinmi.transaction import (\n subscribe_product as xm_subscribe_product, register_xm_account, pay_order as xm_pay_order)\nfrom core.models.bank import Partner\nfrom core.models.profile.signals import before_deleting_bankcard\nfrom jupiter.views.api.track import events\nfrom jupiter.views.api.v1.profile import inject_bankcard_amount_limit\nfrom jupiter.views.api.v1.savings import XinmiOrderSchema, warning\nfrom .common import obtain_bankcard, obtain_coupon\nfrom core.models.hoard.xinmi.errors import SubscribeProductError as XMSubscribeProductError\nfrom .errors import (\n XMOrderOwnershipError, XMOrderInProcessingError, SmsEmptyError,\n XMOrderNotExistedError, XMProductNotExistedError)\n\n\ndef xm_auth(user_id):\n \"\"\"绑定新米账户.\"\"\"\n\n register_xm_account(user_id)\n\n\ndef purchase(json_data, g):\n \"\"\"选购新米产品, 创建理财单\"\"\"\n\n purchase_schema = XinmiPurchaseSchema(strict=True)\n order_schema = XinmiOrderSchema(strict=True)\n result = purchase_schema.load(json_data)\n\n product = obtain_xm_product(result.data['product_id'])\n coupon = obtain_coupon(result.data.get('coupon_id'), g.user)\n bankcard = obtain_bankcard(result.data['bankcard_id'], g)\n g.bankcard_manager.set_default(bankcard)\n pay_amount = result.data.get('pay_amount', result.data['amount'])\n pocket_deduction_amount = result.data.get('pocket_deduction_amount')\n\n if product.product_type is XMProduct.Type.classic:\n due_date = result.data['due_date']\n else:\n due_date = None\n\n try:\n order = xm_subscribe_product(\n g.user,\n product,\n bankcard,\n result.data['amount'],\n pay_amount,\n due_date,\n coupon=coupon,\n pocket_deduction_amount=pocket_deduction_amount)\n inject_bankcard_amount_limit(Partner.xm, [order.bankcard])\n except XMSubscribeProductError as e:\n bankcard_results = before_deleting_bankcard.send(\n bankcard_id=bankcard.id_, user_id=g.user.id_)\n if any(r for _, r in bankcard_results):\n raise XMSubscribeProductError(u'%s。如需修改银行卡信息,请联系微信客服(plan141)' % e)\n raise XMSubscribeProductError()\n\n return order_schema.dump(order).data\n\n\ndef purchase_verify(order_id, json_data, request):\n \"\"\"提供短信验证码, 支付理财单\"\"\"\n\n purchase_verify_schema = XinmiVerifySchema(strict=True)\n order_schema = XinmiOrderSchema(strict=True)\n result = purchase_verify_schema.load(json_data)\n order = obtain_xm_order(order_id, request)\n # pay_code = result.data['stashed_payment_id']\n sms_code = result.data['sms_code']\n\n if not order.is_owner(request.oauth.user):\n raise XMOrderOwnershipError()\n if order.status in [XMOrder.Status.committed, XMOrder.Status.shelved]:\n raise XMOrderInProcessingError()\n if not sms_code:\n raise SmsEmptyError()\n\n xm_pay_order(order, sms_code)\n\n inject_bankcard_amount_limit(Partner.xm, [order.bankcard])\n\n if order.display_status == u'处理中':\n order._confirm_desc = u'支付成功后第二个工作日'\n else:\n order._confirm_desc = order.start_date.date()\n # FIXME: refine fields in model\n order._due_date = order.due_date.date()\n\n events['savings_success'].send(\n request, user_id=order.user_id, order_id=order.id_, amount=unicode(order.amount),\n period='{0.value}-{0.unit}'.format(order.profit_period))\n\n return order_schema.dump(order).data\n\n\ndef obtain_xm_order(order_id, request):\n order = XMOrder.get(order_id)\n if not order:\n warning('用户访问不存在的订单', order_id=order_id)\n raise XMOrderNotExistedError()\n if not order.is_owner(request.oauth.user):\n warning('用户访问他人的订单', order_id=order_id)\n raise XMOrderOwnershipError()\n return order\n\n\ndef obtain_xm_product(product_id):\n product = XMProduct.get(product_id)\n if not product:\n warning('用户访问不存在的产品', product_id=product_id)\n raise XMProductNotExistedError()\n return product\n\n\nclass XinmiPurchaseSchema(Schema):\n \"\"\"新米购买请求实体.\"\"\"\n\n #: :class:`int` 购买产品 (:class:`.XinMiPurchaseSchema`) 的唯一 ID\n product_id = fields.Integer(required=True)\n #: :class:`~decimal.Decimal` 购买金额 (100 的整数倍)\n amount = fields.Decimal(places=0, required=True)\n #: :class:`~decimal.Decimal` 实际支付金额\n pay_amount = fields.Decimal(places=2)\n #: :class:`in` 使用优惠券ID\n coupon_id = fields.Integer()\n #: :class:`~datetime.date` 到期时间\n due_date = fields.Date(attribute='due_date')\n #: :class:`str` 支付所用银行卡 ID\n bankcard_id = fields.String(required=True)\n #: :class:`~decimal.Decimal` 抵扣金额\n pocket_deduction_amount = fields.Decimal(places=2)\n #: :class:`str` 产品供应商\n vendor = fields.String(required=True)\n\n\nclass XinmiVerifySchema(Schema):\n \"\"\"新米购买支付请求实体.\"\"\"\n\n #: :class:`str` 银行卡预留手机号收到的短信验证码\n sms_code = fields.String(required=True)\n #: :class:`str` 产品供应商\n vendor = fields.String(required=True)\n","repo_name":"c1xfr2e/soledad","sub_path":"jupiter/views/api/v2/products/xinmi.py","file_name":"xinmi.py","file_ext":"py","file_size_in_byte":5403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"135493456","text":"import openpyxl\nimport csv\nimport os, sys\nfrom datetime import datetime\n\nSOURCE_FILE = 'C:/dev/__bite_dev/sc_meta/ex2pg/files/source.xlsx'\nDUMP_DIR = 'C:/dev/__bite_dev/sc_meta/ex2pg/files/dump/'\nDUMP_STATE_FILE = 'C:/dev/__bite_dev/sc_meta/ex2pg/files/dump/.dump.state'\nLOG_PATH = 'C:/dev/__bite_dev/sc_meta/ex2pg/files/save2csv.log'\nEMERGENCY_PATH = 'C:/log'\n\nsource_file = \"\"\ndump_dir = \"\"\ndump_state_file = \"\"\nlog_path = \"\"\n\n# Записать лог\ndef log(level: str, message: str, path=None):\n if path is None:\n path = log_path\n\n time = datetime.now().strftime('%H:%M:%S')\n message = f\"{time} {level}: {message}\\n\"\n\n # в файл\n with open(path, 'a', encoding='utf-8') as f:\n f.write(message)\n\n # в вывод\n print(message)\n\ndef emergency_log(msg: str):\n log(\"ERROR\", f\"Произошла ошибка: {msg}\", EMERGENCY_PATH)\n\n# Сохранить дамп\ndef dump_save(source: str, dump_dir: str):\n DUMP_FILENAME = 'dump'\n \n wb = openpyxl.load_workbook(source)\n sh = wb.active #wb['Sheet1']\n\n \n savetime = datetime.now().strftime('%d.%m.%Y - %H.%M')\n dump = dump_dir + DUMP_FILENAME + '_' + savetime + '.csv'\n\n with open(dump, 'w', newline='', encoding='utf-8') as f:\n f_writer = csv.writer(f)\n for row in sh.rows:\n f_writer.writerow([cell.value for cell in row])\n\n log(\"INFO\", f\"Записан новый файл дампа: {dump}\")\n\n# Очистить дамп\ndef dump_cleanup(dump_dir: str):\n file_list = [file for file in os.listdir(dump_dir)]\n file_list.remove('.dump.state')\n\n if len(file_list) > 30:\n file_list.sort()\n os.remove(dump_dir + file_list[0])\n\n log(\"INFO\", \"Дамп очищен\")\n\n# Обновить .dump.state\ndef dump_update(source_file: str, dump_dir: str, dump_state_file: str):\n # file_state получили \n file_state = datetime.fromtimestamp(\n os.path.getmtime(source_file)\n ).strftime('%Y.%m.%d - %H.%M')\n \n # сравнили\n with open(dump_state_file, 'r', encoding='utf-8') as f:\n last_state = f.read()\n\n # если не равны\n if last_state is None or file_state != last_state:\n # 1.сохранили dump\n dump_save(source_file, dump_dir)\n\n # 2.записали новый last_state\n with open(dump_state_file, 'w', encoding='utf-8') as f:\n f.write(file_state)\n else:\n log(\"INFO\", \"Отслеживаемый Excel-файл не обновлялся\")\n\n # исполнить dump policy (очистить дамп)\n dump_cleanup(dump_dir)\n\n# Создать директории для файлов утилиты\ndef dump_prep(source, work_dir):\n global source_file\n global dump_dir\n global dump_state_file\n global log_path\n\n if source == \"\" and work_dir == \"\":\n source_file = SOURCE_FILE\n dump_dir = DUMP_DIR\n dump_state_file = DUMP_STATE_FILE\n log_path = LOG_PATH\n log(\"INFO\", f\"РЕЖИМ ОТЛАДКИ\")\n else:\n if not os.path.exists(source):\n raise FileNotFoundError(\"Указан неверный путь к отслеживаемому файлу (\\\"source\\\").\")\n \n source_file = source\n dump_dir = work_dir + \"/dump/\"\n dump_state_file = work_dir + \"/dump/.dump.state\"\n log_path = work_dir + \"/save2csv.log\"\n\n if not os.path.exists(work_dir):\n os.mkdir(work_dir)\n os.mkdir(dump_dir)\n open(dump_state_file, 'a').close()\n open(log_path, 'a').close()\n log(\"INFO\", f\"Создана директория для файлов утилиты: {work_dir}\")\n\ndef main(source, work_dir): \n dump_prep(source, work_dir)\n dump_update(source_file, dump_dir, dump_state_file)\n\n\nif __name__ == '__main__':\n try:\n main(sys.argv[1], sys.argv[2])\n except Exception as ex:\n emergency_log(str(ex))\n","repo_name":"TrueCookie/dump_ex2csv","sub_path":"save2csv.py","file_name":"save2csv.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"25424796929","text":"from .objects import ModbusObject\nfrom typing import List\n\n\nclass ModbusObjectValidation(Exception):\n pass\n\ndef all_subclasses(cls):\n return set(cls.__subclasses__()).union(\n [s for c in cls.__subclasses__() for s in all_subclasses(c)])\n\ndef get_modbus_object(modbus_number: int, value_to_write: int | bool | None = None) -> ModbusObject:\n for object_class in all_subclasses(ModbusObject):\n try:\n modbus_number = int(modbus_number)\n except ValueError:\n modbus_number = modbus_number\n modbus_number_ok = modbus_number in object_class.NUMBER_RANGE_FAST\n\n if modbus_number_ok:\n return object_class(modbus_number, value_to_write)\n \n raise ModbusObjectValidation(f'provided number {modbus_number} is not valid Modbus object')\n\ndef get_modbus_object_from_range(number_range: str) -> List[ModbusObject]:\n \"\"\"\n \"\"\"\n\n first_object_number = number_range.split('-')[0]\n last_object_number = number_range.split('-')[1]\n\n try:\n first_object_number = int(first_object_number)\n last_object_number = int(last_object_number)\n except ValueError:\n first_object_number = first_object_number\n last_object_number = last_object_number\n\n first_object = get_modbus_object(first_object_number)\n all_numbers = list(first_object.NUMBER_RANGE)\n index_of_first_obj = all_numbers.index(first_object_number)\n index_of_last_obj = all_numbers.index(last_object_number)\n\n objects_in_range = []\n\n for number in range(index_of_first_obj, index_of_last_obj+1):\n objects_in_range.append(\n get_modbus_object(all_numbers[number])\n )\n\n return objects_in_range","repo_name":"pbubas/modbus_wrapper","sub_path":"modbus_wrapper/object_factory.py","file_name":"object_factory.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"17614443800","text":"import json\nimport os\n\nbase = os.path.dirname(__file__)\nimport configparser\nfrom utils.vocab import Vocab\n\nconfig = configparser.ConfigParser()\nconfig.read(base + '/../config/conf.cf')\n\n\ndef parse_vocab(read_filepaths, write_filepaths, json_to_string_accessors, char=None, lower=None,\n include_special=None, min_cutoffs=None, top_n=None):\n \"\"\"\n Parses all the items in the filepaths to vocabs and interpretations. Expects each line in the files in the filepath\n to be in JSON format. Extracts the strings to parse from the json according to the json_to_string_accessors.\n By default parses by character.\n \n :param read_filepaths: Filepaths to search for files to parse vocab from\n :param write_filepaths: Filepaths to write vocab to\n :param json_to_string_accessors: Accessors for the strings to parse from the JSONs\n :param char: Whether to parse by character. Default is True\n :param lower: Whether to lowercase the vocab. Default is True\n :param include_special: Whether to include special characters. Default is True\n :param min_cutoffs: The minimum cutoff for the vocabulary to parse. Default is none.\n :return: None\n \"\"\"\n assert len(write_filepaths) == len(\n json_to_string_accessors), 'Number of vocabulary items is not the same as number of json accessors'\n\n char = char or [False for _ in range(len(write_filepaths))]\n lower = lower or [True for _ in range(len(write_filepaths))]\n include_special = include_special or [False for _ in range(len(write_filepaths))]\n min_cutoffs = min_cutoffs or [0 for _ in range(len(write_filepaths))]\n top_n = top_n or 1000000000\n\n vocabs = [Vocab(filepath=write_filepaths[i],\n char=char[i],\n lower=lower[i],\n include_special=include_special[i],\n cutoff=min_cutoffs[i],\n accessor=json_to_string_accessors[i],\n top_n=top_n)\n for i in range(len(write_filepaths))]\n\n for read_filepath in read_filepaths:\n read_file = open(read_filepath, 'r')\n json_str = read_file.readline()\n print(read_filepath)\n while json_str:\n json_obj = json.loads(json_str)\n for vocab in vocabs:\n vocab._extract_from_json_obj(json_obj)\n json_str = read_file.readline()\n for vocab in vocabs:\n vocab.finalize()\n vocab.save()\n","repo_name":"ultraeric/TwitterBot","sub_path":"utils/parse_utils.py","file_name":"parse_utils.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"9221737801","text":"\"\"\"This file contains all the classes you must complete for this project.\n\nYou can use the test cases in agent_test.py to help during development, and\naugment the test suite with your own test cases to further test your code.\n\nYou must test your agent's strength against a set of agents with known\nrelative strength using tournament.py and include the results in your report.\n\"\"\"\nimport random\nimport math\n\n\nclass Timeout(Exception):\n \"\"\"Subclass base exception for code clarity.\"\"\"\n pass\n\n\"\"\"Check the length of the intersection between legal moves for player and opponent.\nRemove the result from the player number of legal moves\"\"\"\ndef heuristic4(game,player, opponent):\n opp_moves_set = set(game.get_legal_moves(opponent))\n my_moves_set = set(game.get_legal_moves(player))\n l_inter = len(opp_moves_set.intersection(my_moves_set))\n my_len = len(my_moves_set)\n score = (my_len - l_inter)\n return score\n \n\"\"\"Heuristic 2 borrow from Statistics, more precisely, the F Statistics. The F statistics measure differences between \"clusters\" or groups. It is the ratio of the mean variance between centroids ( means of each group) and the mean of the variances within groups\"\"\"\ndef heuristic2(game,player,opponent):\n my_moves = game.get_legal_moves(player)\n my_len = len(my_moves)\n opp_moves = game.get_legal_moves(opponent)\n opp_len = len(opp_moves)\n my_location = game.get_player_location(player)\n opponent_location = game.get_player_location(opponent)\n mean_location = [(my_location[0]+opponent_location[0])/2, (my_location[1]+opponent_location[1])/2]\n sb = (my_location[0] - mean_location[0])*(my_location[0] - mean_location[0])+(my_location[1] - mean_location[1])*(my_location[1] - mean_location[1])+(opponent_location[0] - mean_location[0])*(opponent_location[0] - mean_location[0])+(opponent_location[1] - mean_location[1])*(opponent_location[1] - mean_location[1])\n if (my_len+opp_len-2) > 0:\n sw = 5 * (my_len+opp_len)/(my_len+opp_len-2) \n score = sb/sw\n else:\n score = math.sqrt((my_location[0]-opponent_location[0])**2+(my_location[1]-opponent_location[1])**2)\n return score\n\"\"\"Check how many legal moves player and opponent shares. As the maximum of number of shared legal moves can be at most 2, I\ndecided to set score to 12 if the number of shared legal moves is 2, 10 if shared legal moves is 10 and the number of legal moves for player for all other instances\"\"\"\ndef heuristic3(game,player, opponent):\n my_moves_set = set(game.get_legal_moves(player))\n opp_moves_set = set(game.get_legal_moves(opponent))\n l_inter = len(opp_moves_set.intersection(my_moves_set))\n my_len = len(my_moves_set)\n opp_len = len(opp_moves_set)\n if l_inter == 2:\n score = 12\n elif l_inter == 1:\n score = 10\n else:\n score = my_len\n return score\ndef heuristic1(game,player, opponent):\n weighted_positions = { (0,0): 2, (0,1):3, (0,2): 4,(0,3):4,(0,4): 4,(0,5):3,(0,6):2,\n (1,0): 3, (1,1):4, (1,2): 6,(1,3):6,(1,4): 6,(1,5):4,(1,6):3,\n (2,0): 4, (2,1):6, (2,2): 8,(2,3):8,(2,4): 8,(2,5):6,(2,6):4,\n (3,0): 4, (3,1):6, (3,2): 8,(3,3):8,(3,4): 8,(3,5):6,(3,6):4,\n (4,0): 4, (4,1):6, (4,2): 8,(4,3):8,(4,4): 8,(4,5):6,(4,6):4,\n (5,0): 3, (5,1):4, (5,2): 6,(5,3):6,(5,4): 6,(5,5):4,(5,6):3,\n (6,0): 2, (6,1):3, (6,2): 4,(6,3):4,(6,4): 4,(6,5):3,(6,6):2,\n }\n weighted_positions2 = {(0,0): 4, (0,1):8, (0,2): 16, (0,3):16, (0,4):16,(0,5):8,(0,6):4,\n (1,0): 8, (1,1):16, (1,2): 12,(1,3):12,(1,4): 12,(1,5):16,(1,6):8,\n (2,0): 16, (2,1):12, (2,2): 9,(2,3):9,(2,4): 9,(2,5):12,(2,6):16,\n (3,0): 16, (3,1):12, (3,2): 9,(3,3):9,(3,4): 9,(3,5):12,(3,6):16,\n (4,0): 16, (4,1):12, (4,2): 9,(4,3):9,(4,4): 9,(4,5):12,(4,6):16,\n (5,0): 8, (5,1):16, (5,2): 12,(5,3):12,(5,4): 12,(5,5):16,(5,6):8,\n (6,0): 4, (6,1):8, (6,2): 16,(6,3):16,(6,4): 16,(6,5):8,(6,6):4,\n }\n \n my_location = game.get_player_location(player)\n opponent_location = game.get_player_location(opponent)\n #score = 20 - weighted_positions[my_location]\n my_moves = game.get_legal_moves(player)\n my_len = len(my_moves)\n if my_len > 0:\n score = (10-weighted_positions[my_location])/ my_len\n else: \n score = (10-weighted_positions[my_location])\n return score\ndef custom_score(game, player):\n \"\"\"Calculate the heuristic value of a game state from the point of view\n of the given player.\n\n Note: this function should be called from within a Player instance as\n `self.score()` -- you should not need to call this function directly.\n\n Parameters\n ----------\n game : `isolation.Board`\n An instance of `isolation.Board` encoding the current state of the\n game (e.g., player locations and blocked cells).\n\n player : object\n A player instance in the current game (i.e., an object corresponding to\n one of the player objects `game.__player_1__` or `game.__player_2__`.)\n\n Returns\n -------\n float\n The heuristic value of the current game state to the specified player.\n \"\"\"\n # TODO: finish this function!\n \"\"\" For the score, I know that at most a player has 8 potential legal moves at any them; therefore,\n I decided to use get_legal_moves to find all possible moves, and then count them.\n A location is a strong as the number of possible exits it has, which is similar to the isolation problem worked on in the lecture. \n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n if len(game.get_legal_moves(player)) == 0:\n return float(\"-inf\")\n opponent = game.get_opponent(player)\n score = heuristic1(game,player, opponent)\n \n return float(score )\n\n raise NotImplementedError\n \n\nclass CustomPlayer:\n \"\"\"Game-playing agent that chooses a move using your evaluation function\n and a depth-limited minimax algorithm with alpha-beta pruning. You must\n finish and test this player to make sure it properly uses minimax and\n alpha-beta to return a good move before the search time limit expires.\n\n Parameters\n ----------\n search_depth : int (optional)\n A strictly positive integer (i.e., 1, 2, 3,...) for the number of\n layers in the game tree to explore for fixed-depth search. (i.e., a\n depth of one (1) would only explore the immediate sucessors of the\n current state.)\n\n score_fn : callable (optional)\n A function to use for heuristic evaluation of game states.\n\n iterative : boolean (optional)\n Flag indicating whether to perform fixed-depth search (False) or\n iterative deepening search (True).\n\n method : {'minimax', 'alphabeta'} (optional)\n The name of the search method to use in get_move().\n\n timeout : float (optional)\n Time remaining (in milliseconds) when search is aborted. Should be a\n positive value large enough to allow the function to return before the\n timer expires.\n \"\"\"\n\n def __init__(self, search_depth=8, score_fn=custom_score,\n iterative=True, method='minimax', timeout=10.):\n self.search_depth = search_depth\n self.iterative = iterative\n self.score = score_fn\n self.method = method\n self.time_left = None\n self.TIMER_THRESHOLD = timeout\n\n def get_move(self, game, legal_moves, time_left):\n \"\"\"Search for the best move from the available legal moves and return a\n result before the time limit expires.\n\n This function must perform iterative deepening if self.iterative=True,\n and it must use the search method (minimax or alphabeta) corresponding\n to the self.method value.\n\n **********************************************************************\n NOTE: If time_left < 0 when this function returns, the agent will\n forfeit the game due to timeout. You must return _before_ the\n timer reaches 0.\n **********************************************************************\n\n Parameters\n ----------\n game : `isolation.Board`\n An instance of `isolation.Board` encoding the current state of the\n game (e.g., player locations and blocked cells).\n\n legal_moves : list<(int, int)>\n A list containing legal moves. Moves are encoded as tuples of pairs\n of ints defining the next (row, col) for the agent to occupy.\n\n time_left : callable\n A function that returns the number of milliseconds left in the\n current turn. Returning with any less than 0 ms remaining forfeits\n the game.\n\n Returns\n -------\n (int, int)\n Board coordinates corresponding to a legal move; may return\n (-1, -1) if there are no available legal moves.\n \"\"\"\n\n self.time_left = time_left\n\n # TODO: finish this function!\n\n # Perform any required initializations, including selecting an initial\n # move from the game board (i.e., an opening book), or returning\n # immediately if there are no legal moves\n check_legal_move = game.get_legal_moves()\n number_legal_moves = len(check_legal_move)\n #Stating that a game is a losing game is equivalent to stating that the player has no legal \n # moves. Still, I check for both conditions\n if game.is_loser(self):\n return (-1,-1)\n if number_legal_moves == 0:\n return (-1, -1)\n score = float(\"-inf\")\n position = (-1, -1)\n depth = 0\n try:\n # The search method call (alpha beta or minimax) should happen in\n # here in order to avoid timeout. The try/except block will\n # automatically catch the exception raised by the search method\n # when the timer gets close to expiring\n \"\"\"As we need to loop until we find a winning position or we get to timeout. \n I have decided to create an infinity loop when iterative is true.\n As minimax and alphabeta return best position and score, given a starting position, \n it doesn't make sense to go through the legal moves within the iterative loop. \n Furthermore, as we are interested only on winning position or timeout, \n it makes sense to return position when score is equal to \"inf\", which \n means a winning position.\n If time runs out, a position has to be returned.\n \"\"\"\n while self.iterative == True:\n depth += 1\n if self.method == \"minimax\":\n score, position = self.minimax(game, depth,True) \n elif self.method == \"alphabeta\":\n score, position = self.alphabeta(game, depth)\n if score == float(\"inf\"):\n return position \n if not self.iterative:\n depth = self.search_depth\n if self.method == \"minimax\":\n score, position = self.minimax(game, depth,True) \n elif self.method == \"alphabeta\":\n score, position = self.alphabeta(game, depth)\n pass \n except Timeout:\n return position\n # Handle any actions required at timeout, if necessary\n pass\n\n # Return the best move from the last completed search iteration\n return position\n raise NotImplementedError\n \n \n \"\"\"Implement the minimax search algorithm as described in the lectures.\n\n Parameters\n ----------\n game : isolation.Board\n An instance of the Isolation game `Board` class representing the\n current game state\n\n depth : int\n Depth is an integer representing the maximum number of plies to\n search in the game tree before aborting\n\n maximizing_player : bool\n Flag indicating whether the current search depth corresponds to a\n maximizing layer (True) or a minimizing layer (False)\n\n Returns\n -------\n float\n The score for the current search branch\n\n tuple(int, int)\n The best move for the current branch; (-1, -1) for no legal moves\n\n Notes\n -----\n (1) You MUST use the `self.score()` method for board evaluation\n to pass the project unit tests; you cannot call any other\n evaluation function directly.\n \"\"\"\n\n def minimax(self, game, depth ,maximizing_player=True ):\n if self.time_left() < self.TIMER_THRESHOLD:\n raise Timeout()\n check_legal_move = game.get_legal_moves()\n if game.is_loser(self):\n bestValue = float('-inf')\n current_location = (-1,-1)\n return bestValue,current_location\n if game.is_winner(self):\n bestValue = float('inf')\n current_location = game.get_player_location(self)\n return bestValue,current_location\n '''if len (check_legal_move) == 0:\n bestValue = custom_score(game,self)\n current_location = game.get_player_location(self)\n return bestValue,current_location'''\n if depth == 0:\n bestValue = self.score(game,self)\n return bestValue,(-1,-1)\n if maximizing_player:\n bestValue = float('-inf')\n current_location = game.get_player_location(self)\n for v in check_legal_move:\n v_game = game.forecast_move(v)\n value, location = self.minimax(v_game,depth-1,False)\n if value > bestValue :\n bestValue = value\n current_location = v\n else:\n bestValue = float('inf')\n current_location = game.get_player_location(self)\n for v in check_legal_move: \n v_game = game.forecast_move(v)\n value, location = self.minimax(v_game,depth-1,True)\n if value < bestValue :\n bestValue = value\n current_location = v\n return bestValue, current_location \n \n \n \n def alphabeta(self, game, depth, alpha=float(\"-inf\"), beta=float(\"inf\"), maximizing_player=True):\n \"\"\"Implement minimax search with alpha-beta pruning as described in the\n lectures.\n\n Parameters\n ----------\n game : isolation.Board\n An instance of the Isolation game `Board` class representing the\n current game state\n\n depth : int\n Depth is an integer representing the maximum number of plies to\n search in the game tree before aborting\n\n alpha : float\n Alpha limits the lower bound of search on minimizing layers\n\n beta : float\n Beta limits the upper bound of search on maximizing layers\n\n maximizing_player : bool\n Flag indicating whether the current search depth corresponds to a\n maximizing layer (True) or a minimizing layer (False)\n\n Returns\n -------\n float\n The score for the current search branch\n\n tuple(int, int)\n The best move for the current branch; (-1, -1) for no legal moves\n\n Notes\n -----\n (1) You MUST use the `self.score()` method for board evaluation\n to pass the project unit tests; you cannot call any other\n evaluation function directly.\n \"\"\"\n if self.time_left() < self.TIMER_THRESHOLD:\n raise Timeout()\n\n # TODO: finish this function!\n \n #print(\"current location: \", game.get_player_location(self))\n check_legal_move = game.get_legal_moves()\n if game.is_loser(self):\n bestValue = float('-inf')\n current_location = (-1,-1)\n return bestValue,current_location\n if game.is_winner(self):\n bestValue = float('inf')\n current_location = game.get_player_location(self)\n return bestValue,current_location\n # This is not needed, as if there are no legal moves it means game.is_loser \n '''if len (check_legal_move) == 0:\n bestValue = custom_score(game,self)\n current_location = game.get_player_location(self)\n #print(\"LEAF\",game.get_player_location(self),self.score(game,self))\n return bestValue,current_location'''\n if depth == 0:\n #print(\"REACHED DEPTH \",game.get_player_location(self))\n bestValue = self.score(game,self)\n #print(\"Value \", bestValue)\n return bestValue,(-1,-1)\n if maximizing_player:\n #print(\"Max\")\n bestValue = float('-inf')\n current_location = game.get_player_location(self)\n for v in check_legal_move:\n v_game = game.forecast_move(v)\n value, _ = self.alphabeta(v_game,depth-1,alpha, beta,False)\n #print(\"Value at max \", value, \"location \",current_location,\" V value \",v)\n if value > bestValue :\n bestValue = value\n current_location = v\n #alpha = max(alpha, bestValue)\n if beta <= bestValue:\n return bestValue, current_location\n alpha = max(alpha, bestValue)\n #print(\"Best: \",bestValue,\" Current_location: \",current_location)\n return bestValue, current_location\n else:\n #print(\"Min\")\n bestValue = float('inf')\n current_location = game.get_player_location(self)\n for v in check_legal_move: \n v_game = game.forecast_move(v)\n value, _ = self.alphabeta(v_game,depth-1,alpha, beta, True)\n if value < bestValue :\n bestValue = value\n current_location = v \n #print(\"Best: \",bestValue,\" Current_location: \",current_location)\n if bestValue <= alpha:\n return bestValue, current_location \n beta = min(beta,bestValue)\n return bestValue, current_location\n #raise NotImplementedError\n ","repo_name":"DalilaR/AIND-Isolation","sub_path":"game_agent.py","file_name":"game_agent.py","file_ext":"py","file_size_in_byte":18745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"16479468243","text":"from entorno.simbolo import Simbolo\n\nclass Funcion():\n def __init__(self, id, Lsent, tipo, linea):\n self.id = id\n self.Lsent = Lsent\n self.tipo = tipo\n self.linea = linea\n\n def reconcerFunciones(self, ent):\n simbolo = ent.get(self.id)\n if simbolo == None:\n nsimbolo = Simbolo(self.id, self.tipo, self)\n ent.add(self.id, nsimbolo)\n\n def ejecutar(self, ent):\n simbolo = ent.get(self.id)\n if simbolo == None:\n print(\"la funcion no se encontro\")\n else:\n self.Lsent.ejecutar(ent)","repo_name":"Losajhonny/OLC2A_Compilador_basico","sub_path":"ast/instruccion/Funcion.py","file_name":"Funcion.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"21118079945","text":"\"\"\"\nFaça um algoritmo para ler: quantidade atual em estoque, quantidade \nmáxima em estoque e quantidade mínima em estoque de um produto. \nCalcular e escrever a quantidade média \n((quantidade média = quantidade máxima + quantidade mínima)/2). \nSe a quantidade em estoque for maior ou igual a quantidade média \nescrever a mensagem 'NÃO EFETUAR COMPRA', \nsenão escrever a mensagem 'EFETUAR COMPRA'.\n\"\"\"\n\n\n# Função principal\ndef main():\n # Variaveis\n atual = int()\n maxima = int()\n minima = int()\n media = int()\n\n # Entrada de dados\n atual = int(input())\n maxima = int(input())\n minima = int(input())\n\n # Procesamento\n media = (maxima + minima) / 2\n\n # Saida de dados\n if atual >= media:\n print(\"NÃO EFETUAR COMPRA\")\n else:\n print('EFETUAR COMPRA')\n\n return 0\n\nif __name__ == \"__main__\":\n main()","repo_name":"SimpleR1ick/prog-1","sub_path":"condicionais/lvp_cond_19.py","file_name":"lvp_cond_19.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"12955557480","text":"import openai\nimport os\n#from translating_file import get_percent\nimport tiktoken\nfrom textwrap import wrap\n\n\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\ndef num_tokens_from_string(string: str) -> int:\n \"\"\"Returns the number of tokens in a text string.\"\"\"\n encoding = tiktoken.encoding_for_model(\"gpt-3.5-turbo\")\n num_tokens = len(encoding.encode(string))\n return num_tokens\n\n#this split won't cut any words in half, so it we can still translate\ndef split_string(str,num):\n return wrap(str, len(str)//num)\n\n#concatenates all string in an arr into one string\ndef concatenate_arrstr(arr):\n return ' '.join(arr)\n\ndef percent(count,length):\n per = (int)((count/length)*100)\n return per\n\n#[\"hello im ian im having a bad day\"]\ndef token_valid(arr):\n count = 3\n texts = arr\n if num_tokens_from_string(arr[0]) >= 500: #change 500 to 1780\n texts = split_string(arr[0],count)\n while(num_tokens_from_string(str(max(texts,key=len)))>500):\n count+=2\n texts = split_string(concatenate_arrstr(texts),count-1)\n return texts\n\n\ndef translate(from_lang, to_lang, filename):\n from app.translating_file import get_percent\n translates = []\n count = 1\n file_path = os.path.join(\"downloaded_files\", filename)\n with open(file_path) as new_file: #need import os to change path to get file\n str = new_file.read()\n texts = token_valid([str])\n length = len(texts)\n\n history = [\n {\"role\": \"system\", \"content\": \"You are a translator. The text might not make sense because it's cut from a larger text\"},\n ]\n\n if from_lang == \"detected_language\":\n for i in texts:\n prompt = f\"Translate: [{i}] to {to_lang}. Only include the translated text. Don't include brackets\"\n history.append({\"role\": \"user\", \"content\": prompt})\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=history,\n temperature=0.5,\n )\n text = response['choices'][0]['message']['content']\n get_percent(percent(count,length))\n count+=1\n translates.append(text)\n return concatenate_arrstr(translates)\n else:\n for i in texts:\n prompt = f\"Translate: [{i}] to {to_lang}. Only include the translated text. Don't include brackets\"\n history.append({\"role\": \"user\", \"content\": prompt})\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=history,\n temperature=0.5,\n )\n text = response['choices'][0]['message']['content']\n get_percent(percent(count,length))\n count+=1\n translates.append(text)\n return concatenate_arrstr(translates) #need to chagne dictionary value in translating_file not return sth\n\n\n","repo_name":"IanTsai1/Translation-Website","sub_path":"backend/translate_txtfile.py","file_name":"translate_txtfile.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"8506778994","text":"import asyncio\nimport logging\nimport requests\nimport yaml\nfrom aiogram import Bot, Dispatcher, types, filters, F\nfrom aiogram.filters.command import Command\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.INFO)\n\nwith open('config.yaml', 'r') as config_file:\n config_data = yaml.safe_load(config_file)\n\n# bot config\nbot_config = config_data['bot']\nbotName = bot_config['botName']\nbotToken = bot_config['botToken']\nadminId = bot_config['adminId']\n\n# api config\napi_config = config_data['api']\nheaders = {\n 'x-api-key': api_config['x-api-key']\n}\napi_url = api_config['api_url']\nGET_GOODS_BY_ID = \"GetGoodsById\"\nwelcome_message = api_config['wellcome_message']\n\n\nbot = Bot(token=botToken)\ndp = Dispatcher()\n\n@dp.message(Command(\"start\"))\nasync def cmd_start(message: types.Message):\n await message.answer(\"Ти мені артикул, я тобі залишки!\")\n \n@dp.message()\nasync def echo_message(message: types.Message):\n text = message.text\n await message.reply(\"Шукаю інфу по айді: \" + text)\n print(message.from_user.id,\":\" , text)\n goods = get_goods_by_id(text)\n if goods == \"Wrong request\":\n message_text = \"No information\"\n else:\n message_text = \"Ось що є:\" + \"\\n\" + goods['Data'][0]['CategoryName'] + \"\\n\" + goods['Data'][0]['Name'] + \"\\n\" + str(goods['Data'][0]['PriceBaseWithTax']) + \" грн.\\n\" + str(goods['Data'][0]['RestsMainWhQuantity']) + \" шт.\"\n await message.reply(message_text)\n \ndef isId(text):\n try:\n int(text)\n return True\n except ValueError:\n return False \n\ndef get_goods_by_id(id):\n if not isId(id):\n return \"Wrong request\"\n\n payload = [id]\n url = api_url + GET_GOODS_BY_ID\n logging.info(\"Payload: \" + payload.__str__() )\n logging.info(\"Url: \" + url)\n\n response = requests.post(url, json=payload, headers=headers)\n if response.status_code == 200:\n # The request was successful\n data = response.json()\n logging.info(data)\n return data\n else:\n logging.error(f\"Request failed with status code {response.status_code}\") \n return \"Wrong request\" \n\nasync def sendWellcomeMessage():\n try:\n logging.info(\"Sending [\" + welcome_message + \"]\")\n await bot.send_message(adminId, welcome_message)\n except Exception as e:\n logging.error(f\"Error sending welcome message: {str(e)}\")\n pass\n\nasync def main():\n logging.info(\"Starting bot\")\n await dp.start_polling(bot)\n \nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n logging.info(\"Starting main loop\")\n loop.create_task(main())\n loop.create_task(sendWellcomeMessage())\n loop.run_forever()\n\n","repo_name":"dp-ua/python-Experiments","sub_path":"apibot_bot.py","file_name":"apibot_bot.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"25619630945","text":"import random\n\nsifre=\"\"\nkarakterler=\"ashdgqydıuqwu81623812oıkb654\"\nfor index in range(random.randint(8, 15)): #karakter havuzundan rastgele karakterleri seçip rastgele uzunlukta şifre oluşturuyor\n sifre+=random.choice(karakterler)\n\nprint(sifre)\ndef artikyil():\n if yil%4==0:\n if yil%100==0:\n if yil%400==0:\n return True\n return False\n else:\n return True\n return False\n\n\nfor yil in range(2020 , 1900 ,-1):\n if artikyil():\n print(f\"{yil} yılı artık yıldır\")\n\n\n\n\n\n\nartikyil()\n\n\n","repo_name":"Sametcelikk/Python_tutorial_codes","sub_path":"PYTHON BÖLÜM 1/kapsama alanı random.py","file_name":"kapsama alanı random.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"70025549556","text":"import threading\nimport json \nimport numpy as np\nimport base64\n\nimport pika\n\nfrom draxecdh import crypto\n\nfrom ..keystore import Keystore\n\nclass ReceiverService (threading.Thread):\n \n def __init__(self, channel, projectId, topic, keystore: Keystore, listeners = [], decryptionEnable=True):\n threading.Thread.__init__(self)\n self.listeners = listeners\n self.topic = topic\n self.channel = channel\n self.projectId = projectId\n self.ks = keystore\n self.decryptionEnable = decryptionEnable\n \n def _decrypt(self, body):\n body_json = json.loads(body)\n if body_json['cryptographyDisabled'] == False:\n confBase64 = body_json['configuration']\n confBytes = confBase64.encode('ascii')\n signedData = np.frombuffer(base64.b64decode(confBytes), dtype=np.uint8)\n privateKey = self.ks.getPrivateKey(body_json['nodeId'])\n publicKey = self.ks.draxPublicKey\n privateKey_uint8 = np.frombuffer(bytearray.fromhex(privateKey), dtype=np.uint8)\n publicKey_uint8 = np.frombuffer(bytearray.fromhex(publicKey), dtype=np.uint8)\n unsigned_data = crypto.crypto_unsign(privateKey_uint8, publicKey_uint8, signedData)\n unsigned_data_str = \"\".join(map(chr, unsigned_data))\n body_json['configuration'] = unsigned_data_str\n return body_json\n\n def run(self):\n # set the exchange, if not set before\n self.channel.exchange_declare(exchange=\"amq.topic\", exchange_type='topic', durable=True)\n \n # for each listener, set the queue, bind the queue to the exchange \n # set the callback function, set basic consume and start consuming\n ret = self.channel.queue_declare('', exclusive=True)\n \n queue_name = ret.method.queue\n \n bindingKey = self.projectId + '.' + self.topic.replace(\"/\", \".\")\n self.channel.queue_bind(exchange='amq.topic', queue=queue_name, routing_key=bindingKey)\n \n def callback(ch, method, properties, body):\n if(self.decryptionEnable):\n body_json = self._decrypt(body)\n else:\n body_json = body\n for listener in self.listeners:\n listener.callback(ch, method, properties, body_json)\n \n self.channel.basic_consume(queue=queue_name, on_message_callback=callback, \n auto_ack=False)\n \n print(' [*] Waiting for messages. To exit press CTRL+C')\n self.channel.start_consuming()\n\n","repo_name":"applica-of-things/drax-sdk-py","sub_path":"src/draxsdk/consumer/receiverService.py","file_name":"receiverService.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"26295923757","text":"#!/usr/bin/env python\n\nimport logging\nimport numpy as np\n\nfrom collections import defaultdict, OrderedDict\nfrom mido import MidiFile, MidiTrack, Message, MetaMessage\n\nimport partitura.score as score\nfrom partitura.utils import partition\n\n__all__ = [\"save_score_midi\", \"save_performance_midi\"]\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef get_partgroup(part):\n parent = part\n while parent.parent:\n parent = parent.parent\n return parent\n\n\ndef map_to_track_channel(note_keys, mode):\n ch_helper = {}\n tr_helper = {}\n track = {}\n channel = {}\n for (pg, p, v) in note_keys:\n if mode == 0:\n trk = tr_helper.setdefault(p, len(tr_helper))\n ch1 = ch_helper.setdefault(p, {})\n ch2 = ch1.setdefault(v, len(ch1) + 1)\n track[(pg, p, v)] = trk\n channel[(pg, p, v)] = ch2\n elif mode == 1:\n trk = tr_helper.setdefault(pg, len(tr_helper))\n ch1 = ch_helper.setdefault(pg, {})\n ch2 = ch1.setdefault(p, len(ch1) + 1)\n track[(pg, p, v)] = trk\n channel[(pg, p, v)] = ch2\n elif mode == 2:\n track[(pg, p, v)] = 0\n ch = ch_helper.setdefault(p, len(ch_helper) + 1)\n channel[(pg, p, v)] = ch\n elif mode == 3:\n trk = tr_helper.setdefault(p, len(tr_helper))\n track[(pg, p, v)] = trk\n channel[(pg, p, v)] = 1\n elif mode == 4:\n track[(pg, p, v)] = 0\n channel[(pg, p, v)] = 1\n elif mode == 5:\n trk = tr_helper.setdefault((p, v), len(tr_helper))\n track[(pg, p, v)] = trk\n channel[(pg, p, v)] = 1\n else:\n raise Exception(\"unsupported part/voice assign mode {}\".format(mode))\n\n result = dict((k, (track.get(k, 0), channel.get(k, 1))) for k in note_keys)\n # for (pg, p, voice), v in result.items():\n # pgn = pg.group_name if hasattr(pg, 'group_name') else pg.id\n # print(pgn, p.id, voice)\n # print(v)\n # print()\n return result\n\n\ndef get_ppq(parts):\n ppqs = np.concatenate(\n [part.quarter_durations()[:, 1] for part in score.iter_parts(parts)]\n )\n ppq = np.lcm.reduce(ppqs)\n return ppq\n\n\ndef save_performance_midi(\n performed_part, out, mpq=500000, ppq=480, default_velocity=64\n):\n \"\"\"Save a :class:`~partitura.performance.PerformedPart` instance as a\n MIDI file.\n\n Parameters\n ----------\n performed_part : :class:`~partitura.performance.PerformedPart`\n The performed part to save\n out : str or file-like object\n Either a filename or a file-like object to write the MIDI data\n to.\n mpq : int, optional\n Microseconds per quarter note. This is known in MIDI parlance\n as the \"tempo\" value. Defaults to 500000 (i.e. 120 BPM).\n ppq : int, optional\n Parts per quarter, also known as ticks per beat. Defaults to\n 480.\n default_velocity : int, optional\n A default velocity value (between 0 and 127) to be used for\n notes without a specified velocity. Defaults to 64.\n\n \"\"\"\n track_events = defaultdict(lambda: defaultdict(list))\n\n for c in performed_part.controls:\n track = c.get(\"track\", 0)\n ch = c.get(\"channel\", 1)\n t = int(np.round(10 ** 6 * ppq * c[\"time\"] / mpq))\n track_events[track][t].append(\n Message(\"control_change\", control=c[\"number\"], value=c[\"value\"], channel=ch)\n )\n\n for n in performed_part.notes:\n track = n.get(\"track\", 0)\n ch = n.get(\"channel\", 1)\n t_on = int(np.round(10 ** 6 * ppq * n[\"note_on\"] / mpq))\n t_off = int(np.round(10 ** 6 * ppq * n[\"note_off\"] / mpq))\n vel = n.get(\"velocity\", default_velocity)\n track_events[track][t_on].append(\n Message(\"note_on\", note=n[\"midi_pitch\"], velocity=vel, channel=ch)\n )\n track_events[track][t_off].append(\n Message(\"note_off\", note=n[\"midi_pitch\"], velocity=0, channel=ch)\n )\n\n for p in performed_part.programs:\n track = p.get(\"track\", 0)\n ch = p.get(\"channel\", 1)\n t = int(np.round(10 ** 6 * ppq * p[\"time\"] / mpq))\n track_events[track][t].append(\n Message(\"program_change\", program=int(p[\"program\"]), channel=ch)\n )\n\n if len(performed_part.programs) == 0:\n # Add default program (to each track/channel)\n channels_and_tracks = np.array(\n list(\n set(\n [\n (c.get(\"channel\", 1), c.get(\"track\", 0))\n for c in performed_part.controls\n ]\n + [\n (n.get(\"channel\", 1), n.get(\"track\", 0))\n for n in performed_part.notes\n ]\n )\n ),\n dtype=int,\n )\n\n timepoints = []\n for tr in track_events.keys():\n timepoints += list(track_events[tr].keys())\n timepoints = list(set(timepoints))\n\n for tr in np.unique(channels_and_tracks[:, 1]):\n channel_idxs = np.where(channels_and_tracks[:, 1] == tr)[0]\n track_channels = np.unique(channels_and_tracks[channel_idxs, 0])\n for ch in track_channels:\n track_events[tr][min(timepoints)].append(\n Message(\"program_change\", program=0, channel=ch)\n )\n\n midi_type = 0 if len(track_events) == 1 else 1\n\n mf = MidiFile(type=midi_type, ticks_per_beat=ppq)\n\n for j, i in enumerate(sorted(track_events.keys())):\n track = MidiTrack()\n mf.tracks.append(track)\n if j == 0:\n track.append(MetaMessage(\"set_tempo\", tempo=mpq, time=0))\n t = 0\n for t_msg in sorted(track_events[i].keys()):\n t_delta = t_msg - t\n for msg in track_events[i][t_msg]:\n track.append(msg.copy(time=t_delta))\n t_delta = 0\n t = t_msg\n if out:\n if hasattr(out, \"write\"):\n mf.save(file=out)\n else:\n mf.save(out)\n\n\ndef save_score_midi(\n parts, out, part_voice_assign_mode=0, velocity=64, anacrusis_behavior=\"shift\"\n):\n \"\"\"Write data from Part objects to a MIDI file\n\n Parameters\n ----------\n parts : Part, PartGroup or list of these\n The musical score to be saved.\n out : str or file-like object\n Either a filename or a file-like object to write the MIDI data\n to.\n part_voice_assign_mode : {0, 1, 2, 3, 4, 5}, optional\n This keyword controls how part and voice information is\n associated to track and channel information in the MIDI file.\n The semantics of the modes is as follows:\n\n 0\n Write one track for each Part, with channels assigned by\n voices\n 1\n Write one track for each PartGroup, with channels assigned by\n Parts (voice info is lost) (There can be multiple levels of\n partgroups, I suggest using the highest level of\n partgroup/part) [note: this will e.g. lead to all strings into\n the same track] Each part not in a PartGroup will be assigned\n its own track\n 2\n Write a single track with channels assigned by Part (voice\n info is lost)\n 3\n Write one track per Part, and a single channel for all voices\n (voice info is lost)\n 4\n Write a single track with a single channel (Part and voice\n info is lost)\n 5\n Return one track per combination, each track\n having a single channel.\n\n The default mode is 0.\n velocity : int, optional\n Default velocity for all MIDI notes. Defaults to 64.\n anacrusis_behavior : {\"shift\", \"pad_bar\"}, optional\n Strategy to deal with anacrusis. If \"shift\", all\n time points are shifted by the anacrusis (i.e., the first\n note starts at 0). If \"pad_bar\", the \"incomplete\" bar of\n the anacrusis is padded with silence. Defaults to 'shift'.\n \"\"\"\n\n ppq = get_ppq(parts)\n\n events = defaultdict(lambda: defaultdict(list))\n meta_events = defaultdict(lambda: defaultdict(list))\n\n event_keys = OrderedDict()\n tempos = {}\n\n quarter_maps = [part.quarter_map for part in score.iter_parts(parts)]\n\n first_time_point = min(qm(0) for qm in quarter_maps)\n\n ftp = 0\n # Deal with anacrusis\n if first_time_point < 0:\n if anacrusis_behavior == \"shift\":\n ftp = first_time_point\n elif anacrusis_behavior == \"pad_bar\":\n time_signatures = []\n for qm, part in zip(quarter_maps, score.iter_parts(parts)):\n ts_beats, ts_beat_type = part.time_signature_map(0)\n time_signatures.append((ts_beats, ts_beat_type, qm(0)))\n # sort ts according to time\n time_signatures.sort(key=lambda x: x[2])\n ftp = -time_signatures[0][0] / (time_signatures[0][1] / 4)\n else:\n raise Exception(\n 'Invalid anacrusis_behavior value, must be one of (\"shift\", \"pad_bar\")'\n )\n\n for qm, part in zip(quarter_maps, score.iter_parts(parts)):\n\n pg = get_partgroup(part)\n\n notes = part.notes_tied\n\n def to_ppq(t):\n # convert div times to new ppq\n return int(ppq * (qm(t) - ftp))\n\n for tp in part.iter_all(score.Tempo):\n tempos[to_ppq(tp.start.t)] = MetaMessage(\n \"set_tempo\", tempo=tp.microseconds_per_quarter\n )\n\n for ts in part.iter_all(score.TimeSignature):\n meta_events[part][to_ppq(ts.start.t)].append(\n MetaMessage(\n \"time_signature\", numerator=ts.beats, denominator=ts.beat_type\n )\n )\n\n for ks in part.iter_all(score.KeySignature):\n meta_events[part][to_ppq(ks.start.t)].append(\n MetaMessage(\"key_signature\", key=ks.name)\n )\n\n for note in notes:\n\n # key is a tuple (part_group, part, voice) that will be\n # converted into a (track, channel) pair.\n key = (pg, part, note.voice)\n events[key][to_ppq(note.start.t)].append(\n Message(\"note_on\", note=note.midi_pitch)\n )\n events[key][to_ppq(note.start.t + note.duration_tied)].append(\n Message(\"note_off\", note=note.midi_pitch)\n )\n event_keys[key] = True\n\n tr_ch_map = map_to_track_channel(list(event_keys.keys()), part_voice_assign_mode)\n\n # replace original event keys (partgroup, part, voice) by (track, ch) keys:\n for key in list(events.keys()):\n evs_by_time = events[key]\n del events[key]\n tr, ch = tr_ch_map[key]\n for t, evs in evs_by_time.items():\n events[tr][t].extend((ev.copy(channel=ch) for ev in evs))\n\n # figure out in which tracks to replicate the time/key signatures of each part\n part_track_map = partition(lambda x: x[0][1], tr_ch_map.items())\n for part, rest in part_track_map.items():\n part_track_map[part] = set(x[1][0] for x in rest)\n\n # add the time/key sigs to their corresponding tracks\n for part, m_events in meta_events.items():\n tracks = part_track_map[part]\n for tr in tracks:\n for t, me in m_events.items():\n events[tr][t] = me + events[tr][t]\n\n n_tracks = max(tr for tr, _ in tr_ch_map.values()) + 1\n tracks = [MidiTrack() for _ in range(n_tracks)]\n\n # tempo events are handled differently from key/time sigs because the have a\n # global effect. Instead of adding to each relevant track, like the key/time\n # sig events, we add them only to the first track\n for t, tp in tempos.items():\n events[0][t].insert(0, tp)\n\n for tr, events_by_time in events.items():\n t_prev = 0\n for t in sorted(events_by_time.keys()):\n evs = events_by_time[t]\n delta = t - t_prev\n for ev in evs:\n tracks[tr].append(ev.copy(time=delta))\n delta = 0\n t_prev = t\n\n midi_type = 0 if n_tracks == 1 else 1\n\n mf = MidiFile(type=midi_type, ticks_per_beat=ppq)\n\n for track in tracks:\n mf.tracks.append(track)\n\n if out:\n if hasattr(out, \"write\"):\n mf.save(file=out)\n else:\n mf.save(out)\n","repo_name":"Housemountain/partitura","sub_path":"partitura/io/exportmidi.py","file_name":"exportmidi.py","file_ext":"py","file_size_in_byte":12449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"4"} +{"seq_id":"73966800436","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('rule', '0005_auto_20141002_0239'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='water',\n name='coast_of',\n field=models.ForeignKey(blank=True, to='rule.Province', null=True),\n ),\n migrations.AlterField(\n model_name='water',\n name='connected',\n field=models.ManyToManyField(related_name='connected_rel_+', null=True, to=b'rule.Water', blank=True),\n ),\n ]\n","repo_name":"iizs/aor","sub_path":"rule/migrations/0006_auto_20141002_0241.py","file_name":"0006_auto_20141002_0241.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"43360942122","text":"## Import Modules\n\nimport os\n\nimport csv\n\n\n\n## Use the os module to read file path\n\ncsv_Path = os.path.join(\"Resources\", \"budget_data.csv\")\n\n#print(csv_Path)\n\n\n\n## Open the CSV\n\nwith open(csv_Path, newline = \"\") as csv_File:\n\n\n\n # Initiate csv reader\n\n csv_Reader = csv.reader(csv_File, delimiter = \",\")\n\n #print(csv_Reader)\n\n\n\n # Read the Header row \n\n csv_Header = next(csv_File)\n\n print(f'\\nHeader: {csv_Header}')\n\n\n\n # Create lists to store Date and Revenue columns\n\n DateCol = []\n\n RevenueCol = []\n\n \n\n\n\n # Read each row x in data\n\n for x in csv_Reader:\n\n \n\n # Store all Date records in a list\n\n DateCol.append(x[0])\n\n\n\n # Store all Revenue records in a list\n\n RevenueCol.append(int(x[1]))\n\n\n\n\n\n # Print the count of Months \n\n TotalMonths = len(DateCol)\n\n print(f'Total months in dataset: {TotalMonths}')\n\n\n\n # Total net amount of \"Profi/Losses\" over the entire period\n\n NetTotal = sum(RevenueCol)\n\n print(f'Total: ${NetTotal}')\n\n\n\n # The average change in \"Profit/Losses\" between months over the entire period\n\n AverageChange = round(float(NetTotal/TotalMonths) , 2)\n\n print(f'Average Change: $ {AverageChange}')\n\n\n\n # Greatest Increase in Profits\n\n GreatestIncrease = max(RevenueCol)\n\n print(f'Greatest INCREASE in revenue: ${GreatestIncrease}') #how do i pull in date\n\n\n\n # Greatest Decrease in Profits\n\n GreatestDecrease = min(RevenueCol) \n\n print(f'Greatest DECREASE in revenue: ${GreatestDecrease}') #how do i pull in date\n\n\n\n# Use os module to specify output file to WRITE to\n\ncsv_Output_Path = os.path.join(\"budgetdata.txt\")\n\n\n\n# Open the output file using WRITE mode\n\nwith open(csv_Output_Path, \"w\", newline = \"\") as csv_File_Out:\n\n\n\n # Initialize csv.writer\n\n csv_Writer = csv.writer(csv_File_Out)\n\n\n\n # Write results to text file\n\n csv_Writer.writerow([\"Results\"])\n\n csv_Writer.writerow([\"----------\"])\n\n csv_Writer.writerow([\"Total months in dataset: \" + str(TotalMonths)])\n\n csv_Writer.writerow([\"Total: $\" + str(NetTotal)])\n\n csv_Writer.writerow([\"Average Change: $\" + str(AverageChange)])\n\n csv_Writer.writerow([\"Greatest INCREASE in revenue: $\" + str(GreatestInc)]) #how do i pull in date\n\n csv_Writer.writerow([\"Greatest DECREASE in revenue: $\" + str(GreatestDec)]) #how do i pull in date\n\n \n\n","repo_name":"diamondamil/python-challenge","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"33163550330","text":"import pandas as pd\nfrom androguard.core.bytecodes.apk import APK\nfrom androguard.core.bytecodes.dvm import DalvikVMFormat\nfrom androguard.core.analysis.analysis import Analysis\nfrom androguard.decompiler.decompiler import DecompilerJADX\nfrom androguard.core.androconf import show_logging\nimport pickle\nimport os\nimport re\nimport operator\n\npermissions_dict = {}\napis_dict = {}\nglobal_permissions = []\nglobal_apis = []\n\ndef get_permissions(path):\n application = APK(path)\n permissions = application.get_permissions()\n permissions = list(set(permissions))\n return permissions\n\ndef get_apis(path):\n \n application = APK(path)\n application_dex = DalvikVMFormat(application.get_dex())\n application_x = Analysis(application_dex)\n\n methods = set()\n cs = [cc.get_name() for cc in application_dex.get_classes()]\n\n for method in application_dex.get_methods():\n g = application_x.get_method(method)\n\n if method.get_code() == None:\n continue\n\n for i in g.get_basic_blocks().get():\n for ins in i.get_instructions():\n \n output = ins.get_output()\n match = re.search(r'(L[^;]*;)->[^\\(]*\\([^\\)]*\\).*', output)\n if match and match.group(1) not in cs:\n methods.add(match.group())\n\n methods = list(methods)\n return methods\n\n\ndef collect_perms(perms):\n global_permissions.append(perms)\n for p in perms:\n if p in permissions_dict:\n permissions_dict[str(p)] += 1\n else:\n permissions_dict[str(p)] = 1\n \n \ndef refine_apis(apis):\n api_list = []\n for api in apis:\n start_index = api.find('>')\n end_index = api.find('(')\n start_index+=1\n api = api[start_index:end_index] + '()' \n api_list.append(api)\n \n return api_list\n\ndef collect_apis(apis):\n ref_apis = refine_apis(apis)\n global_apis.append(ref_apis)\n for api in ref_apis:\n if api in apis_dict:\n apis_dict[str(api)] += 1\n else:\n apis_dict[str(api)] = 1\n\n##path = r\"C:\\Users\\Anurag\\Documents\\GitHub\\mobile_malware_detection\\benign_apks\\\\Panchatantra Stories Book - 1.2 - APKTurbo.com.apk\"\n##perms = get_permissions(path)\n##collect_perms(perms)\n##apis = get_apis(path)\n##collect_apis(apis)\n##\n##print(len(global_apis))\n##print((global_permissions))\n\nfile_names = []\nfile_paths = []\n\nfor root,dirs,files in os.walk(r'C:\\Users\\Anurag\\Documents\\GitHub\\mobile_malware_detection\\benign_apks'):\n file_names.append((files))\n \nfor f in file_names[0]:\n path = r'C:\\Users\\Anurag\\Documents\\GitHub\\mobile_malware_detection\\benign_apks\\\\' + '\\\\' + str(f)\n file_paths.append(path)\n\ncount = 0\n\nfor path in file_paths:\n print('reading ' + str(count+1) + ' file' , path)\n perms = get_permissions(path)\n row = collect_perms(perms)\n apis = get_apis(path)\n collect_apis(apis)\n count+=1\n\nsorted_permissions_dict = sorted(permissions_dict.items(), key = operator.itemgetter(1) , reverse = True)\n##for key , value in sorted_permissions_dict:\n## print(key , value)\n\nsorted_apis_dict = sorted(apis_dict.items() , key = operator.itemgetter(1) , reverse = True)\n##for key , value in sorted_apis_dict:\n## print(key , value)\n\n\nwith open('saved_items/benign_permissions.pickle' , 'wb') as handle:\n pickle.dump(sorted_permissions_dict , handle)\n\nwith open('saved_items/benign_api_calls.pickle' , 'wb') as handle:\n pickle.dump(sorted_apis_dict , handle)\n\nwith open('saved_items/benign_apis_list.pickle' , 'wb') as handle:\n pickle.dump(global_apis , handle)\n \nwith open('saved_items/benign_permissions_list.pickle' , 'wb') as handle:\n pickle.dump(global_permissions , handle)\n\n\n","repo_name":"anuragdhasmana1995/mobile_malware_detection","sub_path":"program/benign_apis_permissions.py","file_name":"benign_apis_permissions.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"1039790910","text":"import numpy as np\nfrom scipy import stats\n\n# if you want to use your own Decision Tree implementation for Random Forest\nfrom decision_tree import DTClassifier\n\n# something useful for tracking algorithm's iterations\nimport progressbar\n\nwidgets = ['Model Training: ', progressbar.Percentage(), ' ',\n progressbar.Bar(marker=\"-\", left=\"[\", right=\"]\"),\n ' ', progressbar.ETA()]\n\ndef get_bootstrap_samples(X, y, nr_bootstraps, nr_samples=None):\n # this function is for getting bootstrap samples with replacement\n # from the initial dataset (X, y)\n # nr_bootstraps is the number of bootstraps needed\n # nr_samples is the number of data points to sample each time\n # it should be the size of X, if nr_samples is not provided\n # Hint: you may need np.random.choice function somewhere in this function\n if nr_samples is None:\n nr_samples = X.shape[0]\n bootstrap_samples = []\n for i in range(nr_bootstraps):\n indexes = np.arange(X.shape[0])\n random_indexes = np.random.choice(indexes, size=nr_samples, replace=True)\n bootstrap_samples.append((X[random_indexes].copy(), y[random_indexes].copy()))\n return bootstrap_samples\n\nclass Bagging:\n def __init__(self, base_estimator, nr_estimators=10):\n # number of models in the ensemble\n self.nr_estimators = nr_estimators\n self.progressbar = progressbar.ProgressBar(widgets=widgets)\n # this can be any object that has 'fit', 'predict' methods\n self.base_estimator = base_estimator\n\n def fit(self, X, y):\n # this method will fit a separate model (self.base_estimator)\n # on each bootstrap sample and each model should be stored\n # in order to use it in 'predict' method\n X = np.array(X)\n y = np.array(y)\n bootstrap_samples = get_bootstrap_samples(X, y,\n nr_bootstraps=self.nr_estimators)\n self.models = []\n for i in self.progressbar(range(self.nr_estimators)):\n X_test, y_test = bootstrap_samples[i]\n model = self.base_estimator()\n model.fit(X_test, y_test)\n self.models.append(model)\n\n def predict(self, X):\n # this method will predict the labels for a given test dataset\n # get the majority 'vote' for each test instance from the ensemble\n # Hint: you may want to use 'mode' method from scipy.stats\n y_predictions = np.array([model.predict(X) for model in self.models])\n y_preds = stats.mode(y_predictions)[0][0]\n return y_preds\n\nclass RandomForest:\n def __init__(self, nr_estimators=10, max_features=None, min_samples_split=2,\n min_gain=0, max_depth=float(\"inf\")):\n # number of trees in the forest\n self.nr_estimators = nr_estimators\n\n # this is the number of features to use for each tree\n # if not specified this should be set to sqrt(initial number of features)\n self.max_features = max_features\n\n # the rest is for decision tree\n self.min_samples_split = min_samples_split\n self.min_gain = min_gain\n self.max_depth = max_depth\n self.progressbar = progressbar.ProgressBar(widgets=widgets)\n\n def fit(self, X, y):\n # this method will fit a separate tree\n # on each bootstrap sample and subset of features\n # each tree should be stored\n # in order to use it in 'predict' method\n\n X = np.array(X)\n y = np.array(y)\n bootstrap_samples = get_bootstrap_samples(X, y,\n self.nr_estimators)\n\n self.trees = []\n for i in self.progressbar(range(self.nr_estimators)):\n # you can modify the code to use sklearn's decision tree\n # if you don't want to use your implementation\n tree = DTClassifier(\n min_samples_split=self.min_samples_split,\n min_impurity=self.min_gain,\n max_depth=self.max_depth)\n X_boot, y_boot = bootstrap_samples[i]\n\n count = self.max_features if self.max_features else int(np.sqrt(X.shape[1]))\n idx = np.random.choice(X.shape[1], size=count, replace=False)\n\n # we need to keep the indices of the features used for this tree\n tree.feature_indices = idx\n tree.fit(X_boot[:, idx], y_boot)\n self.trees.append(tree)\n\n def predict(self, X):\n # this method will predict the labels for a given test dataset\n # get the majority 'vote' for each test instance from the forest\n # Hint: you may want to use 'mode' method from scipy.stats\n # besides the individual trees, you will also need the feature indices\n # it was trained on\n y_predictions = np.array([tree.predict(X[:, tree.feature_indices]) for tree in self.trees])\n y_preds = stats.mode(y_predictions)[0][0]\n return y_preds\n\nclass WeightedVoting:\n def __init__(self, estimators, num_folds=3):\n # list of classifier objects\n self.estimators = estimators\n self.nr_estimators = len(estimators)\n self.weights = None\n self.num_folds = num_folds\n\n def get_weights(self, X, y):\n # This method is for deriving the weights of each individual classifier\n # using cross-validation as described in the lecture slides\n # the output should be an array of weights\n weights = np.empty(self.nr_estimators)\n X_folds = np.array_split(X, self.num_folds)\n y_folds = np.array_split(y, self.num_folds)\n X_folds= [el for el in X_folds if el.size > 0]\n y_folds= [el for el in y_folds if el.size > 0]\n for i_estimator, estimator in enumerate(self.estimators):\n avg_acc = 0\n for i in range(self.num_folds):\n X_train, X_val = np.concatenate(X_folds[:i] + X_folds[i+1:]), X_folds[i]\n y_train, y_val = np.concatenate(y_folds[:i] + y_folds[i+1:]), y_folds[i]\n estimator.fit(X_train, y_train)\n prediction = estimator.predict(X_val)\n acc = (prediction == y_val).sum() / len(y_val)\n avg_acc += acc\n avg_acc /= self.num_folds\n weights[i_estimator] = avg_acc\n weights /= weights.sum()\n return weights\n\n def fit(self, X, y):\n # Train the individual models on the whole training dataset\n # and update self.estimators accordingly in order to use them for prediction\n self.weights = self.get_weights(X, y)\n for estimetor in self.estimators:\n estimetor.fit(X, y)\n\n def predict(self, X):\n # Use the fitted individual models and their weights to perform prediction\n # This link may be useful\n # https://scikit-learn.org/stable/modules/ensemble.html#weighted-average-probabilities-soft-voting\n predictions_proba = self.estimators[0].predict_proba(X) * self.weights[0]\n for estimator, weight in zip(self.estimators[1:], self.weights[1:]):\n predictions_proba += estimator.predict_proba(X) * weight\n predictions = predictions_proba.argmax(axis=1)\n return predictions\n\n\nclass Stacking:\n def __init__(self, estimators, final_estimator, meta_features='class', cv=False, k=None):\n # list of classifier objects\n self.estimators = estimators\n # classifier for the meta-model\n self.final_estimator = final_estimator\n self.nr_estimators = len(estimators)\n # meta-features (input) of the meta-model, this should take to values either\n # 'class' if we take the predicted labels or\n # 'prob' if we take the class probabilities from the individual models.\n\n # In case of 'prob' you need to use 'predict_proba' method on sklearn classifiers\n # and need to discard one of the probability values. For example, if the task is a 2 class classification problem,\n # then each individual model's predict_proba method will return a vector of 2 values for each class's probability\n # and we can discard one of those values because it is the complement of the other class. ([p, q], where q = 1-p)\n # In case we have a m-class classification problem and T individual models, then the input for the meta-model will be\n # T * (m-1) dimensional vector, since each model will give m-1 probability values.\n\n # In case of 'class', the input for the meta-model will be a T dimensional vector.\n self.meta_features = meta_features\n # boolean specifying whether to use cross validation for deriving the meta-features or not, as described in the lecture slides\n self.cv = cv\n # number of folds of cross validation\n self.k = k\n\n def _predict(self, estimator, X):\n if self.meta_features == 'class':\n prediction = estimator.predict(X)\n elif self.meta_features == 'prob':\n prediction = estimator.predict_proba(X)\n prediction = prediction[:, 1:]\n if prediction.shape[1] == 1:\n prediction = prediction.ravel()\n else:\n raise ValueError('meta_features is invalid')\n return prediction\n\n def get_predictions(self, X, y=None, fit=False):\n predictions = []\n if not self.cv or not fit:\n for estimator in self.estimators:\n if fit:\n estimator.fit(X, y)\n prediction = self._predict(estimator, X)\n predictions.append(prediction)\n else:\n X_folds = np.array_split(X, self.k)\n y_folds = np.array_split(y, self.k)\n X_folds= [el for el in X_folds if el.size > 0]\n y_folds= [el for el in y_folds if el.size > 0]\n for estimator in self.estimators:\n predictions_estimator = []\n for i in range(self.k):\n X_train, X_val = np.concatenate(X_folds[:i] + X_folds[i+1:]), X_folds[i]\n y_train, y_val = np.concatenate(y_folds[:i] + y_folds[i+1:]), y_folds[i]\n estimator.fit(X_train, y_train)\n prediction = self._predict(estimator, X_val)\n predictions_estimator.append(prediction)\n predictions.append(np.concatenate(np.array(predictions_estimator)))\n return np.array(predictions).T\n\n def fit(self, X, y):\n # Derive the meta-features and train the meta-model on it\n predictions = self.get_predictions(X, y, fit=True)\n # print(predictions)\n self.final_estimator.fit(predictions, y)\n\n def predict(self, X):\n # Get the predictions of the individual models and provide them as inputs to the meta-model\n predictions = self.get_predictions(X)\n result = self.final_estimator.predict(predictions)\n return result\n","repo_name":"TigranGit/MachineLearningAlgorithms","sub_path":"HW5/ensemble_methods.py","file_name":"ensemble_methods.py","file_ext":"py","file_size_in_byte":10014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"823213539","text":"import numpy as np\nimport math\nimport networkx\nimport scipy.signal\nimport torch\nimport torch.nn as nn\nimport RSA_methods as RM\nimport random\nfrom util import data_reduction\nimport Rearrangement as Rea\nfrom tqdm import tqdm\nimport datetime\n\n\nrandom.seed(0)\nmethod = [RM.SPT, RM.MST, RM.SFMOR]\n\nclass GNN_agent():\n def __init__(self, net_graph, criterion, model, device, optimizer, model_path = None, summary_writer = None):\n # self.name = 'agent_' + str(id)\n # self.name = name\n # self.opt_a = opt_a\n # self.opt_c = opt_c\n # self.globalAC = globalAC\n # self.coord = coord\n # self.globalTrainStep = globalTrainStep\n # self.maxTrainStep = maxTrainStep\n # self.net_graph = net_graph.copy()\n # self.model_path = model_path\n # self.summary_writer = summary_writer\n # self.gamma = 0.95\n #self.name = 'agent_' + str(id)\n #self.name = name\n self.G = net_graph.copy()\n self.method = RM.SFMOR\n self.service_list = []\n self.criterion = criterion\n self.model = model\n self.device = device\n self.optimizer = optimizer\n self.episodes = 1000\n self.batch_size = 32\n self.path_map = np.load('path_map.npy', allow_pickle=True)\n\n def random_request(self):\n source = random.randint(1, 14)\n num_destination = random.randint(2, 5)\n destination = []\n for i in range(num_destination):\n d = random.randint(1, 14)\n while d == source or d in destination:\n d = random.randint(1, 14)\n destination.append(d)\n # len_fs = random.randint(1, 20)\n bandwidth = random.randint(1, 500)\n time = random.randint(1, 100)\n\n # return source, destination, len_fs, time\n return source, destination, bandwidth, time\n\n def join(self):\n if len(self.service_list) == 0:\n return -1, -1\n\n i = random.randint(0, len(self.service_list) - 1)\n source = self.service_list[i][1]\n destination = self.service_list[i][2]\n if len(destination) > 10:\n return -1, -1\n d = random.randint(1, 14)\n while d == source or d in destination:\n d = random.randint(1, 14)\n return i, d\n\n def leave(self):\n if len(self.service_list) == 0:\n return -1, -1\n\n i = random.randint(0, len(self.service_list) - 1)\n # while len(self.service_list[i][2]) <= 1:\n # i = random.randint(0, len(self.service_list) - 1)\n source = self.service_list[i][1]\n destination = self.service_list[i][2]\n if len(destination) <= 1:\n return -1, -1\n d = random.choice(destination)\n\n return i, d\n\n def update_fs(self, path, len_fs: int, start_f: int):\n if len(path) <= 1:\n return\n for i in range(len_fs):\n for j in range(len(path) - 1):\n self.G[path[j]][path[j + 1]]['fs'][start_f + i] = 0\n\n def release_fs(self, path, len_fs: int, start_f: int):\n if len(path) <= 1:\n return\n for i in range(len_fs):\n for j in range(len(path) - 1):\n self.G[path[j]][path[j + 1]]['fs'][start_f + i] = 1\n\n def update_request(self, path_tree):\n for path, len_fs, start_f in path_tree:\n self.update_fs(path, len_fs, start_f)\n\n def release_request(self, time_to):\n remove_list = []\n flag = 0\n for r in self.service_list:\n r[-1] = r[-1] - time_to\n if r[-1] <= 0:\n flag = 1\n for path, len_fs, start_f in r[0]:\n self.release_fs(path, len_fs, start_f)\n remove_list.append(r)\n for i in remove_list:\n self.service_list.remove(i)\n return flag\n\n def get_down(self, destination, child, i):\n down = []\n for c in child[i]:\n down.append(c)\n down.extend(self.get_down(destination, child, destination.index(c)))\n return down\n\n def Full_rearrangement(self, r):\n\n path_tree, source, destination, bandwidth, time = self.service_list[r]\n for path, len_fs, start_f in path_tree:\n self.release_fs(path, len_fs, start_f)\n path_tree_new = self.method(self.G, source, destination, bandwidth)\n if len(path_tree_new) != 0:\n self.update_request(path_tree_new)\n self.service_list[r][0] = path_tree_new\n return 0\n # for path, len_fs, start_f in path_tree:\n # self.release_fs(path, len_fs, start_f)\n else:\n for path, len_fs, start_f in path_tree:\n self.update_fs(path, len_fs, start_f)\n return 1\n\n def Partial_rearrangement(self, r):\n\n path_tree, source, destination, bandwidth, time = self.service_list[r]\n up_member = [source]\n down_member = []\n remove_list = []\n append_list = []\n block = 0\n for path, len_fs, start_f in path_tree:\n if (path[0] not in [source] + destination) and (path[-1] not in [source] + destination):\n remove_list.append([path, len_fs, start_f])\n elif (path[0] not in [source] + destination):\n remove_list.append([path, len_fs, start_f])\n down_member.append(path[-1])\n elif (path[-1] not in [source] + destination):\n # self.service_list[r][0].remove([path, len_fs, start_f])\n remove_list.append([path, len_fs, start_f])\n flag = 0\n for p in path_tree:\n if path[-1] == p[0] and p[-1] in destination:\n flag = 1\n if flag == 1:\n up_member.append(path[0])\n\n while len(down_member) != 0:\n p = []\n for i in up_member:\n for j in down_member:\n p.append(self.path_map[i, j])\n p = sorted(p, key=lambda x: x[1])\n flag = 1\n for path_1, len_path_n in p:\n # path = p[0][0]\n # len_path = p[0][1]\n\n len_fs_1 = RM.modulation_level(bandwidth, len_path_n)\n start_f_1 = RM.SP_FF(self.G, path_1, len_fs_1)\n if start_f_1 != -1:\n append_list.append([path_1, len_fs_1, start_f_1])\n down_member.remove(path_1[-1])\n flag = 0\n break\n if flag == 1:\n block = 1\n return 1\n\n # Vin.append(path[-1])\n\n if block == 0:\n for path, len_fs, start_f in append_list:\n self.update_fs(path, len_fs, start_f)\n self.service_list[r][0].append([path, len_fs, start_f])\n for path, len_fs, start_f in remove_list:\n self.release_fs(path, len_fs, start_f)\n self.service_list[r][0].remove([path, len_fs, start_f])\n\n # reroute\n path_tree, source, destination, bandwidth, time = self.service_list[r]\n hid_list = [0 for i in range(len(destination))]\n hop_list = [0 for i in range(len(destination))]\n # parent = [[] for i in range(len(destination))]\n child = [[] for i in range(len(destination))]\n down = [[] for i in range(len(destination))]\n for path, len_fs, start_f in path_tree:\n hid_list[destination.index(path[-1])] = start_f + len_fs - 1\n hop_list[destination.index(path[-1])] = Rea.get_hops(path_tree, source, path[-1])\n\n if path[0] != source:\n # parent[destination.index(path[-1])].append(path[0])\n child[destination.index(path[0])].append(path[-1])\n for i in range(len(destination)):\n down[i] = self.get_down(destination, child, i)\n Cth = max(hid_list) * sum(hop_list) / len(destination)\n for i in range(len(destination)):\n cost = hid_list[i] * hop_list[i]\n if cost > Cth:\n p = []\n for j in [source] + destination:\n if j != destination[i] and j not in down[i]:\n #p.append(self.path_map[j, destination[i]])\n path_n, len_path_n = self.path_map[j, destination[i]]\n len_fs_n = RM.modulation_level(bandwidth, len_path_n)\n _, cut_n, start_f = RM.cal_min_cut_num(self.G, path_n, len_fs_n)\n p.append([path_n, len_path_n, len_fs_n, cut_n, start_f])\n p = sorted(p, key=lambda x: x[3])\n for path_n, len_path_n, len_fs_n, cut_n, start_f_n in p:\n block = 1\n #len_fs_1 = RM.modulation_level(bandwidth, len_path_n)\n #start_f_1 = RM.SP_FF(self.G, path_1, len_fs_1)\n if start_f_n != -1:\n block = 0\n for path, len_fs, start_f in path_tree:\n if path[-1] == destination[i]:\n if path != path_n:\n self.service_list[r][0].remove([path, len_fs, start_f])\n self.release_fs(path, len_fs, start_f)\n self.service_list[r][0].append([path_n, len_fs_n, start_f_n])\n self.update_fs(path_n, len_fs_n, start_f_n)\n for j in range(len(destination)):\n down[j].append(destination[i])\n down[j].extend(down[i])\n break\n break\n if block == 1:\n return 1\n return 0\n\n\n\n def train(self, model, device, buffer_g, buffer_a, buffer_b, optimizer):\n model.train()\n\n batch_graph = buffer_g\n output = buffer_a\n\n #labels = torch.LongTensor([graph.label for graph in batch_graph]).to(device)\n\n # compute loss\n loss = self.criterion(buffer_b, 0)\n\n # backprop\n if optimizer is not None:\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n loss = loss.detach().cpu().numpy()\n\n print(\"loss training: %f\" % (loss))\n\n return loss\n\n def rmsa(self):\n episode_size = 1000\n num_episode = 0\n num_block = 0\n num_request = 0\n ep_block = 0\n blocking_rate_list = []\n\n buffer_g = []\n buffer_a = []\n buffer_b = []\n buffer_n = []\n\n while num_episode < self.episodes:\n time_to = 1\n flag = self.release_request(time_to)\n if flag == 1: # rearrangement\n for i in range(len(self.service_list)):\n g = data_reduction(self.service_list[i], self.G)\n buffer_g.append(g)\n action = self.model([g])\n buffer_a.append(action)\n if action == 0:\n buffer_b.append(0)\n buffer_n.append(0)\n elif action == 1:\n buffer_b.append(self.Full_rearrangement(i))\n buffer_n.append(1)\n elif action == 2:\n buffer_b.append(self.Partial_rearrangement(i))\n buffer_n.append(1)\n\n num_request += 1\n for i in range(len(buffer_n)):\n buffer_n[i] += 1\n # print(num_request)\n mode = random.randint(0, 2)\n\n if mode == 0: # a multicast session first appears\n source, destination, bandwidth, time = self.random_request()\n path_tree = self.method(self.G, source, destination, bandwidth)\n if len(path_tree) == 0:\n num_block += 1\n for i in range(len(buffer_b)):\n buffer_b[i] += 1\n ep_block += 1\n else:\n self.service_list.append([path_tree, source, destination, bandwidth, time])\n self.update_request(path_tree)\n elif mode == 1: # a new member d to join\n i, d = self.join()\n if i != -1:\n tree, source, destination, bandwidth, _ = self.service_list[i]\n flag = 0\n for path, _, _ in tree:\n if d == path[0] or d == path[-1]:\n flag = 1\n if flag == 1:\n self.service_list[i][2].append(d)\n else:\n p = []\n for u in ([source] + destination):\n p.append(self.path_map[u, d])\n p = sorted(p, key=lambda x: x[1])\n path = p[0][0]\n len_path = p[0][1]\n len_fs = RM.modulation_level(bandwidth, len_path)\n start_f = RM.SP_FF(self.G, path, len_fs)\n if start_f == -1:\n num_block += 1\n for i in range(len(buffer_b)):\n buffer_b[i] += 1\n ep_block += 1\n else:\n self.update_fs(path, len_fs, start_f)\n self.service_list[i][0].append([path, len_fs, start_f])\n self.service_list[i][2].append(d)\n else: # a member d to leave\n i, d = self.leave()\n if i != -1:\n tree, source, destination, bandwidth, _ = self.service_list[i]\n flag = 0\n for path, _, _ in tree:\n if path[0] == d:\n flag = 1\n\n if flag == 1: # d has downstream members\n self.service_list[i][2].remove(d)\n else:\n self.service_list[i][2].remove(d)\n for path, len_fs, start_f in tree:\n if path[-1] == d:\n self.release_fs(path, len_fs, start_f)\n self.service_list[i][0].remove([path, len_fs, start_f])\n\n if num_request % episode_size == 0:\n num_episode += 1\n print(\"Ep: {}, Blocking P: {}, Ep Bp: {}\".format(num_episode,\n num_block / num_request,\n ep_block / episode_size))\n blocking_rate_list.append(num_block / num_request)\n ep_block = 0\n\n if len(buffer_g) == 2 * self.batch_size - 1:\n # buffer_v_target = tl.rein.discount_episode_rewards(np.asarray(buffer_r[i]), self.gamma, mode=1)\n # self.agents[i].update_global(np.vstack(buffer_s[i][:self.batch_size]),\n # np.vstack(buffer_a[i][:self.batch_size]),\n # np.vstack(buffer_v_target[:self.batch_size]),\n # self.globalAC[i],\n # num_episode)\n for i in range(self.batch_size):\n buffer_b[i] = buffer_b[i]/buffer_n[i]\n self.train(self.model, self.device, buffer_g[:self.batch_size], buffer_a[:self.batch_size], buffer_b[:self.batch_size], self.optimizer)\n del buffer_g[:self.batch_size]\n del buffer_a[:self.batch_size]\n del buffer_b[:self.batch_size]\n del buffer_n[:self.batch_size]\n\n self.release_request(1000)\n return num_block / num_request","repo_name":"1207473307/OFC2021_txj","sub_path":"Waste/GNN_agent.py","file_name":"GNN_agent.py","file_ext":"py","file_size_in_byte":16167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"40259606238","text":"import os\nimport csv\nimport tempfile\n\n\nclass CarBase:\n def __init__(self, brand, photo_file_name, carrying):\n self.car_type = None\n self.brand = brand\n self.photo_file_name = photo_file_name\n self.carrying = carrying\n\n def get_photo_file_ext(self):\n return os.path.splitext(self.photo_file_name)[1]\n\n\nclass Car(CarBase):\n def __init__(self, brand, photo_file_name, carrying, passenger_seats_count):\n self.car_type = \"car\"\n self.brand = brand\n self.photo_file_name = photo_file_name\n self.carrying = carrying\n self.passenger_seats_count = passenger_seats_count\n\n\nclass Truck(CarBase):\n def __init__(self, brand, photo_file_name, carrying, body_whl):\n self.car_type = \"truck\"\n self.brand = brand\n self.photo_file_name = photo_file_name\n self.carrying = carrying\n if body_whl:\n dimentions = body_whl.split(\"x\")\n self.body_length = float(dimentions[0])\n self.body_width = float(dimentions[1])\n self.body_height = float(dimentions[2])\n else:\n self.body_length = self.body_height = self.body_width = None\n\n def get_body_volume(self):\n try:\n return self.body_height * self.body_width * self.body_length\n except TypeError:\n print(\"Unknown body dimensions\")\n\n\nclass SpecMachine(CarBase):\n def __init__(self, brand, photo_file_name, carrying, extra):\n self.car_type = \"spec_machine\"\n self.brand = brand\n self.photo_file_name = photo_file_name\n self.carrying = carrying\n self.extra = extra\n\n\ndef get_car_list(csv_filename):\n car_list = []\n with open(csv_filename, 'r') as f:\n reader = csv.reader(f)\n next(reader, None)\n for row in reader:\n vehicle = ', '.join(row).split(\";\")\n if vehicle[0] == \"\":\n continue\n\n if vehicle[0] == \"car\":\n if vehicle[1] and vehicle[2] and vehicle[3] and vehicle[5]:\n car = Car(vehicle[1], vehicle[3], float(vehicle[5]), int(vehicle[2]))\n car_list.append(car)\n else:\n continue\n\n elif vehicle[0] == \"truck\":\n if vehicle[1] and vehicle[3] and vehicle[5]:\n truck = Truck(vehicle[1], vehicle[3], float(vehicle[5]), vehicle[4])\n car_list.append(truck)\n else:\n continue\n\n elif vehicle[0] == \"spec_machine\":\n if vehicle[1] and vehicle[3] and vehicle[5] and vehicle[6]:\n spec_machine = SpecMachine(vehicle[1], vehicle[3], float(vehicle[5]), vehicle[6])\n car_list.append(spec_machine)\n else:\n continue\n\n else:\n print(\"Incorrect vehicle type\")\n return car_list\n\n#scania = Truck(\"Scania\", \"photo.png\", 30, \"12x6x2.5\")\n#print(scania.get_body_volume())\n#print(get_car_list(\"cars.csv\")[2].get_body_volume())","repo_name":"Black0274/Diving-in-Python-Course","sub_path":"Week3/car_types.py","file_name":"car_types.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"27571021414","text":"import torch\nimport torch.nn as nn\n\n\nclass SumFusion(nn.Module):\n def __init__(self, input_dim=512, output_dim=100):\n super(SumFusion, self).__init__()\n self.fc_x = nn.Linear(input_dim, output_dim)\n self.fc_y = nn.Linear(input_dim, output_dim)\n\n def forward(self, x, y):\n output = self.fc_x(x) + self.fc_y(y)\n return x, y, output\n\n\nclass ConcatFusion(nn.Module):\n def __init__(self, input_dim=1024, output_dim=100):\n super(ConcatFusion, self).__init__()\n self.fc_out = nn.Linear(input_dim, output_dim)\n\n def forward(self, x, y):\n output = torch.cat((x, y), dim=1)\n output = self.fc_out(output)\n return x, y, output\n\n\nclass FiLM(nn.Module):\n \"\"\"\n FiLM: Visual Reasoning with a General Conditioning Layer,\n https://arxiv.org/pdf/1709.07871.pdf.\n \"\"\"\n\n def __init__(self, input_dim=512, dim=512, output_dim=100, x_film=True):\n super(FiLM, self).__init__()\n\n self.dim = input_dim\n self.fc = nn.Linear(input_dim, 2 * dim)\n self.fc_out = nn.Linear(dim, output_dim)\n\n self.x_film = x_film\n\n def forward(self, x, y):\n\n if self.x_film:\n film = x\n to_be_film = y\n else:\n film = y\n to_be_film = x\n\n gamma, beta = torch.split(self.fc(film), self.dim, 1)\n\n output = gamma * to_be_film + beta\n output = self.fc_out(output)\n\n return x, y, output\n\n\nclass GatedFusion(nn.Module):\n \"\"\"\n Efficient Large-Scale Multi-Modal Classification,\n https://arxiv.org/pdf/1802.02892.pdf.\n \"\"\"\n\n def __init__(self, input_dim=512, dim=512, output_dim=100, x_gate=True):\n super(GatedFusion, self).__init__()\n\n self.fc_x = nn.Linear(input_dim, dim)\n self.fc_y = nn.Linear(input_dim, dim)\n self.fc_out = nn.Linear(dim, output_dim)\n\n self.x_gate = x_gate # whether to choose the x to obtain the gate\n\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x, y):\n out_x = self.fc_x(x)\n out_y = self.fc_y(y)\n\n if self.x_gate:\n gate = self.sigmoid(out_x)\n output = self.fc_out(torch.mul(gate, out_y))\n else:\n gate = self.sigmoid(out_y)\n output = self.fc_out(torch.mul(out_x, gate))\n\n return out_x, out_y, output\n\n","repo_name":"GeWu-Lab/OGM-GE_CVPR2022","sub_path":"models/fusion_modules.py","file_name":"fusion_modules.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","stars":167,"dataset":"github-code","pt":"4"} +{"seq_id":"3481907256","text":"import sys\nfrom ROOT import TCanvas,TLegend,THStack,TLegend\nfrom plotting.RootFileHandler import RootFileHandler\nfrom plotting.PlotStyle import setupAxes,setPlotStyle, colorRwthDarkBlue,\\\n\tdrawLabelCmsPrivateSimulation, colorRwthLightBlue, colorRwthGruen,\\\n\tcolorRwthTuerkis, colorRwthRot, colorRwthMagenta, setupPalette\nfrom plotting.OutputModule import CommandLineHandler,CliColors\nfileHandler = RootFileHandler(sys.argv[1])\n\nsetPlotStyle()\ncli = CommandLineHandler('[QualityCodes] ')\n\nqualityCodeDict = {\n\t\t\t\t1:'Halo Muon',\n\t\t\t\t2:'Very low qual. Type 1',\n\t\t\t\t3:'Very low qual. Type 2',\n\t\t\t\t4:'Very low qual. Type 3',\n\t\t\t\t5:'unmatched RPC',\n\t\t\t\t6:'unmatched DT or CSC',\n\t\t\t\t7:'matched DT-RPC or CSC-RPC'\n\t\t\t\t}\n\ngridSizeDict = {\n\t\t\t0:'Central',\n\t\t\t1:'3x3',\n\t\t\t2:'5x5'}\n\ndef plotQualityCodes():\n\tc = TCanvas('cQualityCodes')\n\tc.SetLogy()\n\tqualityCodes = fileHandler.getHistogram('multiplicity/L1MuonQualityCodesCentral_Multiplicity')\n\tqualityCodesFail = fileHandler.getHistogram('multiplicity/L1MuonQualityCodesCentralFail_Multiplicity')\n\t\n\tqualityCodes3x3 = fileHandler.getHistogram('multiplicity/L1MuonQualityCodes3x3_Multiplicity')\n\tqualityCodes3x3Fail = fileHandler.getHistogram('multiplicity/L1MuonQualityCodes3x3Fail_Multiplicity')\n\t\n\tqualityCodes5x5 = fileHandler.getHistogram('multiplicity/L1MuonQualityCodes5x5_Multiplicity')\n\tqualityCodes5x5Fail = fileHandler.getHistogram('multiplicity/L1MuonQualityCodes5x5Fail_Multiplicity')\n\t\n\tsetupAxes(qualityCodes)\n\tqualityCodes.SetTitle('Quality codes for grid matching;;#')\n\tqualityCodes.SetLineColor(colorRwthDarkBlue)\n\tqualityCodes.GetXaxis().SetRangeUser(0,8)\n\tqualityCodes.SetStats(0)\n\tqualityCodes.SetLineWidth(3)\n\t\n\tc.cd().SetBottomMargin(0.15)\n\t#Label the bins with the meaning of the quality code\n\tfor i in range(1,8):\n\t\tqualityCodes.GetXaxis().SetBinLabel(qualityCodes.FindBin(i),qualityCodeDict.get(i))\n\t\n\tsetupAxes(qualityCodesFail)\n\tqualityCodesFail.SetLineWidth(3)\n\tqualityCodesFail.SetLineStyle(7)\n\tqualityCodesFail.SetLineColor(colorRwthLightBlue)\n\t\n\tsetupAxes(qualityCodes3x3)\n\tqualityCodes3x3.SetLineWidth(3)\n\tqualityCodes3x3.SetLineStyle(10)\n\tqualityCodes3x3.SetLineColor(colorRwthGruen)\n\t\n\tsetupAxes(qualityCodes3x3Fail)\n\tqualityCodes3x3Fail.SetLineWidth(3)\n\tqualityCodes3x3Fail.SetLineStyle(8)\n\tqualityCodes3x3Fail.SetLineColor(colorRwthTuerkis)\n\t\n\tsetupAxes(qualityCodes5x5)\n\tqualityCodes5x5.SetLineWidth(3)\n\tqualityCodes5x5.SetLineColor(colorRwthRot)\n\t\n\tsetupAxes(qualityCodes5x5Fail)\n\tqualityCodes5x5Fail.SetLineWidth(3)\n\tqualityCodes5x5Fail.SetLineColor(colorRwthMagenta)\n\t\n\tqualityCodes.Scale(1/qualityCodes.Integral())\n\tqualityCodes3x3.Scale(1/qualityCodes3x3.Integral())\n\tqualityCodes3x3Fail.Scale(1/qualityCodes3x3Fail.Integral())\n\tqualityCodes5x5.Scale(1/qualityCodes5x5.Integral())\n\tqualityCodes5x5Fail.Scale(1/qualityCodes5x5Fail.Integral())\n\tqualityCodesFail.Scale(1/qualityCodesFail.Integral())\n\t\n\tqualityCodes.Draw()\n\tqualityCodesFail.Draw('same')\n\tqualityCodes3x3.Draw('same')\n\tqualityCodes3x3Fail.Draw('same')\n#\tqualityCodes5x5.Draw('same')\n#\tqualityCodes5x5Fail.Draw('same')\n\t\n\tlegend = TLegend(0.1,0.6,0.3,0.9)\n\tlegend.AddEntry(qualityCodes,\"Central\",\"l\")\n\tlegend.AddEntry(qualityCodesFail,\"Central Fail\",\"l\")\n\tlegend.AddEntry(qualityCodes3x3,\"3x3\",\"l\")\n\tlegend.AddEntry(qualityCodes3x3Fail,\"3x3 Fail\",\"l\")\n\tlegend.SetFillColor(0)\n#\tlegend.AddEntry(qualityCodes5x5,\"5x5\",\"l\")\n#\tlegend.AddEntry(qualityCodes5x5Fail,\"5x5 Fail\",\"l\")\n\tlegend.Draw()\n\t\n\tlabel = drawLabelCmsPrivateSimulation()\n\tc.Update()\n\t\n\tc.SaveAs('plots/efficiency/qualityCodes.pdf')\n\t\n\treturn c,qualityCodes,label,qualityCodesFail,qualityCodes3x3,qualityCodes3x3Fail,qualityCodes5x5,qualityCodes5x5Fail,legend\n\ndef plotQualityCodesStacked(gridSize):\n\tgridString = gridSizeDict.get(gridSize)\n\tc = TCanvas('cQualityCodes' + gridString + 'Stacked','Stacked QC ' + gridString,600,0,800,600)\n\tc.SetLogy()\n\tc.cd().SetBottomMargin(0.15)\n\tc.cd().SetRightMargin(0.20)\n\tqualityCodes = fileHandler.getHistogram('multiplicity/L1MuonQualityCodes' + gridString + '_Multiplicity')\n\tqualityCodesFail = fileHandler.getHistogram('multiplicity/L1MuonQualityCodes' + gridString + 'Fail_Multiplicity')\n\t\n\tcountQualityCodes = fileHandler.getHistogram('multiplicity/L1MuonAllQualityCodes_Multiplicity')\n\tcountQualityCodesTruth = fileHandler.getHistogram('multiplicity/L1MuonTruthAllQualityCodes_Multiplicity')\n\t\n\tprint\n\tcli.output('Sanity check for quality code counts')\n\tfor i in range(1,8):\n\t\tnTotalHistogram = countQualityCodes.GetBinContent(countQualityCodes.FindBin(i))\n\t\tnFail = qualityCodesFail.GetBinContent(qualityCodesFail.FindBin(i))\n\t\tnPass = qualityCodes.GetBinContent(qualityCodes.FindBin(i))\n\t\tnSummed = nFail + nPass\n\t\tprint\n\t\tcli.output('NTotal: %d\\t\\tNSummed: %d' % (nTotalHistogram,nSummed))\n\t\tcli.output('Sanity check: %s'% (CliColors.OKBLUE + 'OK' + CliColors.ENDC if nTotalHistogram == nSummed else CliColors.FAIL + 'FAIL' + CliColors.ENDC) )\n\t\tprint\n\t\tif nTotalHistogram:\n\t\t\tqualityCodes.SetBinContent(qualityCodes.FindBin(i),nPass/float(nTotalHistogram))\n\t\t\tqualityCodesFail.SetBinContent(qualityCodesFail.FindBin(i),nFail/float(nTotalHistogram))\n\t\n\tstack = THStack(\"hstack\",\"Quality Codes in matching to HO (\" + gridString + \");;rel. fraction\")\n\t\n\tqualityCodes.SetLineColor(colorRwthDarkBlue)\n\tqualityCodes.SetFillColor(colorRwthDarkBlue)\n\tqualityCodes.SetFillStyle(3002)\n\n\tqualityCodesFail.SetFillColor(colorRwthMagenta)\n\tqualityCodesFail.SetLineColor(colorRwthMagenta)\n\tqualityCodesFail.SetFillStyle(3002)\n\n\tstack.Add(qualityCodes)\n\tstack.Add(qualityCodesFail)\n\t\n\tstack.Draw()\n\tstack.GetXaxis().SetRangeUser(0,8)\n\t\t#Label the bins with the meaning of the quality code\n\tfor i in range(1,8):\n\t\tstack.GetXaxis().SetBinLabel(stack.GetXaxis().FindBin(i),qualityCodeDict.get(i))\n\t\t\n\tlegend = TLegend(0.82,0.75,0.99,0.9)\n\tlegend.AddEntry(qualityCodes,\"Passed\",\"f\")\n\tlegend.AddEntry(qualityCodesFail,\"Failed\",\"f\")\n\tlegend.Draw()\n\t\n\tlabel = drawLabelCmsPrivateSimulation(x1ndc=0.5,y1ndc=0.9,x2ndc=0.8,y2ndc=0.93)\n\t\n\tsetupAxes(stack)\n\t\t\n\tc.Update()\n\t\n\tc.SaveAs('plots/efficiency/qualityCodesStacked' + gridString + '.pdf')\n\t\n\treturn stack,c,qualityCodes,qualityCodesFail,legend,label\n\ndef createPlotPtVsQualityCode(gridSize):\n\tsourceHistogramsForGrid = {\n\t\t1:'correlation/L1MuonpTvsQCCentralFail',\n\t\t2:'correlation/L1MuonpTvsQC3x3Fail',\n\t\t3:'correlation/L1MuonpTvsQC5x5Fail'\n\t}\n\thistogramTitleDict = {\n\t\t1:'Central',\n\t\t2:'3x3',\n\t\t3:'5x5'\n\t}\n\thistogram = fileHandler.getHistogram(sourceHistogramsForGrid.get(gridSize))\n\ttitle = 'p_{T} vs. rejected QC (' + histogramTitleDict.get(gridSize) + ')'\n\tcanvasTitle = 'cPtVsQualityCodes' + histogramTitleDict.get(gridSize)\n\t\n\tcanvas = TCanvas(canvasTitle,'cPtVsQualityCodes' + histogramTitleDict.get(gridSize),800,0,800,600)\n\tcanvas.SetLogz()\n\t\t\n\thistogram.GetXaxis().SetRangeUser(0,8)\n\thistogram.GetYaxis().SetRangeUser(-1,160)\n\thistogram.SetStats(0)\n\thistogram.SetTitle(title)\n\t\n\thistogram.Scale(1,'width')\n\t\n\thistogram.Draw('colz')\n\t\n\tlabel = drawLabelCmsPrivateSimulation()\n\tcanvas.Update()\n\t\n\tsetupPalette(histogram)\n\tcanvas.Update()\n\t\n\tfileNameTrunk = 'plots/efficiency/ptVsQualityCode' + histogramTitleDict.get(gridSize) + 'Fail'\n\t\n\tcanvas.SaveAs(fileNameTrunk + '.png')\n\tcanvas.SaveAs(fileNameTrunk + '.pdf')\n\t\n\thistogram.Draw('lego2')\n\tcanvas.Update()\t\t\n\t\t\n\tcanvas.SaveAs(fileNameTrunk + '3D.png')\n\tcanvas.SaveAs(fileNameTrunk + '3D.pdf')\n\t\n\treturn canvas,label,histogram\n\ndef plotQualityCodesVsPt():\n\tallPlots = []\n\n\tallPlots.append(createPlotPtVsQualityCode(1))\n\tallPlots.append(createPlotPtVsQualityCode(2))\n\tallPlots.append(createPlotPtVsQualityCode(3))\n\t\n\tcanvas = TCanvas('allQCCodes',\"All QC\")\n\tcanvas.SetLogy()\n\tcanvas.cd().SetBottomMargin(0.15)\n\n\thistAllCodes = fileHandler.getHistogram('multiplicity/L1MuonAllQualityCodes_Multiplicity')\n\thistAllCodesTruth = fileHandler.getHistogram('multiplicity/L1MuonTruthAllQualityCodes_Multiplicity')\n\n\tsetupAxes(histAllCodes)\n\n\thistAllCodes.SetLineWidth(3)\n\thistAllCodesTruth.SetLineWidth(3)\n\thistAllCodes.SetLineColor(colorRwthDarkBlue)\n\thistAllCodes.Scale(1/histAllCodes.Integral())\n\thistAllCodes.GetXaxis().SetRangeUser(0,8)\t\n\thistAllCodes.SetStats(0)\n\thistAllCodes.SetTitle('L1 muon quality codes;;rel. fraction')\n\t#Label the bins with the meaning of the quality code\n\tfor i in range(1,8):\n\t\thistAllCodes.GetXaxis().SetBinLabel(histAllCodes.GetXaxis().FindBin(i),qualityCodeDict.get(i))\n\t\n\thistAllCodes.Draw()\n\t\n\thistAllCodesTruth.Scale(1/histAllCodesTruth.Integral())\n\thistAllCodesTruth.SetLineColor(colorRwthMagenta)\n\thistAllCodesTruth.Draw('Same')\n\t\n\tlabel = drawLabelCmsPrivateSimulation()\n\t\n\tlegend = TLegend(0.1,0.75,0.3,0.9)\n\tlegend.AddEntry(histAllCodes,\"All L1\",\"l\")\n\tlegend.AddEntry(histAllCodesTruth,\"L1 Truth\",\"l\")\n\tlegend.Draw()\n\t\n\tcanvas.Update()\n\tcanvas.SaveAs('plots/efficiency/allQualityCodes.pdf')\n\t\n\tallPlots.append([histAllCodes,histAllCodesTruth,canvas,legend,label])\n\t\n\treturn allPlots\n","repo_name":"Kuenni/MCHAMMER","sub_path":"python/efficiency/QualityCodes.py","file_name":"QualityCodes.py","file_ext":"py","file_size_in_byte":8861,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"31362111128","text":"##############################################################################\r\n# server.py\r\n##############################################################################\r\n\r\nimport socket\r\nimport chatlib\r\nimport select\r\nimport random\r\nimport requests\r\n\r\n# GLOBALS\r\nusers = {}\r\nquestions = {}\r\nlogged_users = {} # a dictionary of client hostnames to usernames - will be used later\r\n\r\nERROR_MSG = \"Error! \"\r\nSERVER_PORT = 5678\r\nSERVER_IP = \"127.0.0.1\"\r\n\r\nclient_socket_list = []\r\nmessages_to_send = []\r\n\r\n# HELPER SOCKET METHODS\r\n\r\ndef build_and_send_message(conn, code, data):\r\n\tglobal messages_to_send\r\n\t\"\"\"\r\n\tBuilds a new message using chatlib, wanted code and message. \r\n\tPrints debug info, then sends it to the given socket.\r\n\tParamaters: conn (socket object), code (str), data (str)\r\n\tReturns: Nothing\r\n\t\"\"\"\r\n\ttry:\r\n\t\tbuilt_message = chatlib.build_message(code, data)\r\n\t\tmessages_to_send.append((conn, built_message))\r\n\t\tprint(\"[SERVER] \", built_message) # Debug print\r\n\texcept Exception as e:\r\n\t\tprint(f'{e} was was raised!')\r\n\t\tquit()\r\n\r\ndef recv_message_and_parse(conn):\r\n\t\"\"\"\r\n\tRecieves a new message from given socket,\r\n\tthen parses the message using chatlib.\r\n\tParamaters: conn (socket object)\r\n\tReturns: cmd (str) and data (str) of the received message. \r\n\tIf error occured, will return None, None\r\n\t\"\"\"\r\n\ttry:\r\n\t\tfull_msg = conn.recv(1024).decode()\r\n\texcept:\r\n\t\treturn 'None', '#'\r\n\tcmd, data = chatlib.parse_message(full_msg)\r\n\tprint(\"[CLIENT] \",full_msg)\t # Debug print\r\n\treturn cmd, data\r\n\r\n\r\n# Data Loaders #\r\n\r\ndef load_questions():\r\n\t\"\"\"\r\n\tLoads questions bank from https://opentdb.com/api.php?amount=50&type=multiple (50 at a time.)\r\n\tRecieves: -\r\n\tReturns: questions dictionary\r\n\t\"\"\"\r\n\turl = 'https://opentdb.com/api.php?amount=50&type=multiple'\r\n\tr = requests.get(url)\r\n\tif r.status_code == 200: #status code 200 (hopefully)\r\n\t\tall_questions_dict = r.json()[\"results\"]\r\n\t\tenumerated_questions_dict = {}\r\n\t\tfor index, pair in enumerate(all_questions_dict):\r\n\t\t\tenumerated_questions_dict[index] = pair\r\n\t\treturn enumerated_questions_dict\r\n\telse:\r\n\t\tprint(f'[SERVER]!! Server couldnt start since no questions were able to be loaded HTTP ERR CODE {r.status_code}')\r\n\t\tquit()\r\n\r\ndef load_user_database():\r\n\t\"\"\"\r\n\tLoads users list from file\t## FILE SUPPORT TO BE ADDED LATER\r\n\tRecieves: -\r\n\tReturns: user dictionary\r\n\t\"\"\"\r\n\tusers = {\r\n\t\t\t\"test\"\t\t:\t{\"password\":\"test\",\"score\":0,\"questions_asked\":[]},\r\n\t\t\t\"yossi\"\t\t:\t{\"password\":\"123\",\"score\":50,\"questions_asked\":[]},\r\n\t\t\t\"master\"\t:\t{\"password\":\"master\",\"score\":200,\"questions_asked\":[]}\r\n\t\t\t}\r\n\treturn users\r\n\r\n# SOCKET CREATOR\r\n\r\ndef setup_socket():\r\n\t\"\"\"\r\n\tCreates new listening socket and returns it\r\n\tRecieves: -\r\n\tReturns: the socket object\r\n\t\"\"\"\r\n\tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\tsock.bind((SERVER_IP, SERVER_PORT))\r\n\tsock.listen()\r\n\treturn sock\r\n\r\n\r\ndef send_error(conn, error_msg):\r\n\t\"\"\"\r\n\tSend error message with given message\r\n\tRecieves: socket, message error string from called function\r\n\tReturns: None\r\n\t\"\"\"\r\n\tbuild_and_send_message(conn, chatlib.PROTOCOL_SERVER['err_msg'], error_msg)\r\n\r\ndef create_random_question():\r\n\tselect_question_number = random.choice(list(questions.keys()))\r\n\tall_answer_options = [questions[select_question_number][\"correct_answer\"]] + questions[select_question_number]['incorrect_answers']\r\n\trandom.shuffle(all_answer_options)\r\n\treturn str(select_question_number) + '#' + questions[select_question_number][\"question\"] + ('#'.join(all_answer_options).replace(' ', '_')).replace('"', ' ')\r\n\r\n\r\n##### MESSAGE HANDLING\r\ndef handle_question_message(conn):\r\n\t\"\"\"\r\n\tThis function calls the 'create_random_question()' function to get a string of a random message and sends it to the client\r\n\tusing the 'build_and_send_message()' function.\r\n\tRecives: socket object of a client\r\n\tReturns: None\r\n\t\"\"\"\r\n\tuser_question = create_random_question()\r\n\tbuild_and_send_message(conn, chatlib.PROTOCOL_SERVER[\"question_res\"], user_question)\r\n\r\ndef handle_answer_message(conn, user_msg):\r\n\tquestion_id, user_answer = user_msg.split('/')\r\n\tquestion_id = int(question_id)\r\n\tif questions[question_id][\"correct_answer\"].lower() == user_answer.lower().replace('#', ' '):\r\n\t\tusername = logged_users[conn.getpeername()]\r\n\t\tusers[username]['score'] += 5 # adding 5 points per right answer\r\n\t\tbuild_and_send_message(conn, chatlib.PROTOCOL_SERVER['answer_correct'], '') # sending the user that he is right on his answer\r\n\telse:\r\n\t\tbuild_and_send_message(conn, chatlib.PROTOCOL_SERVER[\"answer_wrong\"], str(questions[question_id][\"correct_answer\"])) # sending incorrect answer for wrong answer\r\n\r\ndef handle_logged_message(conn):\r\n\tlogged_users_msg = \"#\".join([user for user in logged_users.values()])\r\n\tbuild_and_send_message(conn, chatlib.PROTOCOL_SERVER[\"logged_list\"], logged_users_msg)\r\n\r\ndef handle_highscore_message(conn):\r\n\thigh_score_msg = \"#\".join([f\"{user}:\" + str(users[user][\"score\"]) for user in users.keys()])\r\n\tbuild_and_send_message(conn, chatlib.PROTOCOL_SERVER[\"highscore_res\"], high_score_msg)\r\n\r\n\r\n\r\ndef handle_getscore_message(conn, username):\r\n\tglobal users\r\n\tscore = str(users[username][\"score\"])\r\n\tbuild_and_send_message(conn, chatlib.PROTOCOL_SERVER[\"score_res\"], score)\r\n\r\n\r\ndef handle_logout_message(conn):\r\n\t\"\"\"\r\n\tCloses the given socket (in laster chapters, also remove user from logged_users dictioary)\r\n\tRecieves: socket\r\n\tReturns: None\r\n\t\"\"\"\r\n\tglobal logged_users\r\n\tglobal client_socket_list\r\n\tdel logged_users[conn.getpeername()]\r\n\tclient_socket_list.remove(conn)\r\n\tconn.close()\r\n\r\n\r\ndef handle_login_message(conn, data):\r\n\t\"\"\"\r\n\tGets socket and message data of login message. Checks user and pass exists and match.\r\n\tIf not - sends error and finished. If all ok, sends OK message and adds user and address to logged_users\r\n\tRecieves: socket, message code and data\r\n\tReturns: None (sends answer to client)\r\n\t\"\"\"\r\n\tglobal users # This is needed to access the same users dictionary from all functions\r\n\tglobal logged_users\t # To be used later\r\n\tusername = data.split('#')[0]\r\n\tpassword = data.split('#')[1]\r\n\tif username in users.keys():\r\n\t\tif users[username]['password'] == password:\r\n\t\t\tusers[username] = {'password': password, 'score': 0, 'questions_asked': []}\r\n\t\t\tlogged_users[conn.getpeername()] = username\r\n\t\t\tbuild_and_send_message(conn, chatlib.PROTOCOL_SERVER[\"login_ok_msg\"], \"\")\r\n\t\telse:\r\n\t\t\tsend_error(conn, \"Wrong password.\")\r\n\telse:\r\n\t\tsend_error(conn, \"Unable to find username.\")\r\n\r\n\r\ndef handle_client_message(conn, cmd, data):\r\n\t\"\"\"\r\n\tGets message code and data and calls the right function to handle command\r\n\tRecieves: socket, message code and data\r\n\tReturns: None\r\n\t\"\"\"\r\n\tglobal logged_users\t # To be used later\r\n\tif cmd == chatlib.PROTOCOL_CLIENT['login_msg']:\r\n\t\thandle_login_message(conn, data)\r\n\telif cmd == chatlib.PROTOCOL_CLIENT['logout_msg'] and conn.getpeername() in logged_users.keys():\r\n\t\thandle_logout_message(conn)\r\n\telif cmd == chatlib.PROTOCOL_CLIENT['score'] and conn.getpeername() in logged_users.keys():\r\n\t\thandle_getscore_message(conn, data)\r\n\telif cmd == chatlib.PROTOCOL_CLIENT['highscore'] and conn.getpeername() in logged_users.keys():\r\n\t\thandle_highscore_message(conn)\r\n\telif cmd == chatlib.PROTOCOL_CLIENT['users'] and conn.getpeername() in logged_users.keys():\r\n\t\thandle_logged_message(conn)\r\n\telif cmd == chatlib.PROTOCOL_CLIENT['play'] and conn.getpeername() in logged_users.keys():\r\n\t\thandle_question_message(conn)\r\n\telif cmd == chatlib.PROTOCOL_CLIENT['user_answer'] and conn.getpeername() in logged_users.keys():\r\n\t\thandle_answer_message(conn, data)\r\n\telse:\r\n\t\tbuild_and_send_message(conn, chatlib.PROTOCOL_SERVER['err_msg'], 'Non valid choice.')\r\n\r\n\r\ndef main():\r\n\t# Initializes global users and questions dicionaries using load functions, will be used later\r\n\tglobal users\r\n\tglobal questions\r\n\tglobal messages_to_send\r\n\tusers = load_user_database()\r\n\tquestions = load_questions()\r\n\tprint(\"Welcome to Trivia Server!\")\r\n\tserver_socket = setup_socket()\r\n\twhile True:\r\n\t\tready_to_read, ready_to_write, in_err = select.select(\r\n\t\t\t[server_socket] + client_socket_list,\r\n\t\t\tclient_socket_list,\r\n\t\t\t[]\r\n\t\t)\r\n\t\tfor current_socket in ready_to_read:\r\n\t\t\tif current_socket == server_socket:\r\n\t\t\t\t(client_socket, client_address) = server_socket.accept()\r\n\t\t\t\tprint(f'[SERVER] New connection from: {client_address[0]}')\r\n\t\t\t\tclient_socket_list.append(client_socket)\r\n\t\t\telse:\r\n\t\t\t\tcmd ,data = recv_message_and_parse(current_socket)\r\n\t\t\t\tif data == '#' or data == None:\r\n\t\t\t\t\thandle_client_message(current_socket, 'LOGOUT', '')\r\n\t\t\t\telse:\r\n\t\t\t\t\thandle_client_message(current_socket, cmd, data)\r\n\t\t\tfor msg in messages_to_send:\r\n\t\t\t\tthe_socket_obj, data_to_send = msg # connected_socket => client socket\r\n\t\t\t\tif the_socket_obj in ready_to_write:\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tthe_socket_obj.send(data_to_send.encode())\r\n\t\t\t\t\t\tmessages_to_send.remove(msg)\r\n\t\t\t\t\texcept Exception as err:\r\n\t\t\t\t\t\tprint('[SERVER]!! SOMTHING WENT WRONG BUT SERVER IS STILL RUNNING...')\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n","repo_name":"dkiexe/campus_il","sub_path":"Network.py/final_project/server_TCP.py","file_name":"server_TCP.py","file_ext":"py","file_size_in_byte":8974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}