\":\n\t\t\t\tflag = False\n\t\t\telif '' in line:\n\t\t\t\tflag = False\n\n\n\n\n\t\twith open('tabela.html', 'w') as file:\n\t\t\tfile.write(\"
\")\n\t\t\tfile.write(final)\n\t\t\tfile.write(\"
\")\n\t\t\tfile.close()\n\t\treturn True\n\nif __name__ == '__main__':\n\tcreate_excel()","repo_name":"trosemberg/Ajuda-Rodrigo","sub_path":"aut_excel.py","file_name":"aut_excel.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"39307247527","text":"from constants import *\nfrom enum import Enum\nfrom rw import csv, geojson\nfrom caret import spatial\nfrom util import transformator\n\nimport geojson as gj\nimport geopandas as gpd\nimport pandas as pd\n\nclass StationStrategy(Enum):\n HIGH_TRAFFIC_10 = 10\n HIGH_TRAFFIC_20 = 20\n HIGH_TRAFFIC_30 = 30\n HIGH_TRAFFIC_40 = 40\n HIGH_TRAFFIC_50 = 50\n HIGH_TRAFFIC_60 = 60\n HIGH_TRAFFIC_70 = 70\n HIGH_TRAFFIC_80 = 80\n HIGH_TRAFFIC_90 = 90\n ALL = 100\n\ndef get(city, logger):\n \"\"\"Read or create the stations (geo) data frame for a city.\n\n Args:\n city (_str_): the city to get the stations data frame for\n\n Returns:\n _geopandas.GeoDataFrame_: the city's stations (geo) data frame\n \"\"\"\n \n def generate_df():\n \"\"\"Reads station coords + info from cartoradio, convert to geojson, remove stations outside the city.\n\n Returns:\n geopandas.GeoDataFrame: station dataframe with primary key `station_id`, foreign key 'tile_id', geometry + additional station info\n \"\"\"\n \n # (1) read cartoradio anntena + site csv's, join them on `support_number` and extract station info\n antennas = get_antennas(city, logger)\n support_numbers = antennas.index.to_numpy()\n sites = get_sites(city, support_numbers, logger)\n station_info = sites.join(antennas, on='support_number', how='left').reset_index()\n\n # (2) create a geojson feature list from the stations' coords\n features = [\n gj.Feature(\n geometry=gj.Point((station_info.loc[sn]['longitude'], station_info.loc[sn]['latitude'])),\n properties=station_info.loc[sn].to_dict())\n for sn in station_info.index\n ]\n\n # (3) create the (geo) dataframe\n stations = gpd.GeoDataFrame.from_features(features)\n \n # (4) get the city bounds and remove stations outside of the city\n city_bounds = spatial.get_bounds(city, logger).at[0, 'geometry']\n stations = stations[stations['geometry'].within(city_bounds)].reset_index(drop=True)\n \n # (5) add station_id column\n stations['station_id'] = pd.Series(stations.index)\n \n return stations\n\n return geojson.read_or_create(f'{BASE_DIR}/out/station/{city}.geojson', generate_df, logger)\n\ndef apply_strategy(stations, bs_strategy, all_traffic, traffic_timeslotwise):\n if bs_strategy == StationStrategy.ALL:\n return stations\n \n percentage = bs_strategy\n \n num_stations = int(percentage / 100 * len(stations.index))\n \n stations_by_traffic = transformator.Transformator(all_traffic[['station_id', 'traffic']]).groupby_sum('station_id').sort('traffic', ascending=False).reset_index().get()\n stations = stations[stations['station_id'].isin(stations_by_traffic.iloc[:num_stations]['station_id'])].reset_index(drop=True)\n \n return stations # (stations, all_traffic, [t[t['station_id'].isin(stations['station_id'])] for t in traffic_timeslotwise])\n\ndef get_antennas(city, logger):\n \"\"\"Reads antenna csv exported from cartoradio.\n\n Returns:\n pandas.DataFrame: antennas dataframe with primary key `support_number`\n \"\"\"\n \n # (1) read cartoradio antenna csv\n antennas = csv.read_cartoradio(\n f'{BASE_DIR}/data/station/cartoradio/{city}/Antennes_Emetteurs_Bandes_Cartoradio.csv',\n [\"support_number\", \"cartoradio_number\", \"station_number\", \"commissioning_date\", \"operator\", \"antenna_type\", \"antenna_number\", \"antenna_size\", \"directivity\", \"azimuth\", \"height_ground\", \"servive_type\", \"system\", \"start\", \"end\", \"unit\"],\n logger\n )\n \n # (2) remove non-LTE antennas and antennas not operated by Orange\n antennas = antennas[(antennas['operator'] == 'ORANGE') & antennas['system'].apply(lambda x: 'LTE' in x)]\n \n # (3) remove irrelevant columns\n antennas = antennas[['station_number', 'antenna_number', 'support_number']].set_index(['station_number', 'antenna_number']).drop_duplicates().reset_index().set_index('support_number')\n \n return antennas\n \ndef get_sites(city, support_numbers, logger):\n \"\"\"Reads site csv exported from cartoradio.\n\n Args:\n city (_str_): the city to read the sites for\n support_numbers (_numpy.array(_): _description_\n\n Returns:\n pandas.DataFrame: sites dataframe with primary key `support_number`\n \"\"\"\n # (1) read cartoradio site csv\n sites = csv.read_cartoradio(\n f'{BASE_DIR}/data/station/cartoradio/{city}/Sites_Cartoradio.csv',\n [\"support_number\", \"longitude\", \"latitude\", \"position\", \"insee\", \"locality\", \"address\", \"postcode\", \"town\", \"support_type\", \"height\", \"owner\"],\n logger\n )\n \n # (2) remove non-LTE antennas and antennas not operated by Orange (based on the pre-filtered support numbers from the antennas dataframe)\n sites = sites[sites['support_number'].apply(lambda x: x in support_numbers)].set_index('support_number')\n \n return sites","repo_name":"seemoo-lab/caret","sub_path":"caret/station.py","file_name":"station.py","file_ext":"py","file_size_in_byte":4976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"38788317116","text":"#!/usr/bin/env python\n\n# Libraries\nimport pandas as pd\nimport numpy as np\nimport re\nimport os\nimport nltk\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nnltk.download('punkt')\nnltk.download('stopwords')\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer\nfrom sklearn.model_selection import train_test_split\nfrom io import StringIO\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn import metrics\nfrom sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import GridSearchCV\nimport math\nimport json\nimport fasttext\nimport pickle\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# Data import\ndef data_import(filepath, col, col_new):\n\n df = pd.read_csv(filepath, encoding='latin1')\n df[col_new] = df[col]\n df[col_new] = df[col_new].astype(str)\n\n return df\n\n# ext Cleaning\ndef preprocess(x, col, label):\n '''tokenize and normalize'''\n stop_words = set(stopwords.words('english')) \n\n # convert to dataframe\n data = pd.DataFrame({'text': x[col], 'label': x[label]})\n\n # remove html\n data[col] = data.apply(lambda t: re.sub(r'https?://\\S+|www\\.\\S+', '', str(t[col])), axis=1)\n\n # remove stopwords, number, and convert to lower case\n data[col] = data.apply(lambda r: ' '.join(w.lower() for w in r[col].split() if (w.lower() not in stop_words) & (w.isalpha())),axis=1)\n data[col] = data[data[col] != '']\n \n # discard NA reviews\n data = data.dropna()\n\n return data\n\n# TF-IDF\ndef tfidf_transform(train, test, col):\n ngram = (1,2)\n tfidf = TfidfVectorizer(sublinear_tf=True, min_df=5,ngram_range=ngram, stop_words='english')\n tfidf.fit_transform(train[col].values)\n\n # We transform each text into a vector\n vec_train = tfidf.transform(train[col].values)\n vec_test = tfidf.transform(test[col].values)\n\n # save best performing svm model\n with open('model/tfidf_vec.pkl', 'wb') as f:\n pickle.dump(tfidf, f)\n\n return vec_train, vec_test\n\n\ndef main():\n train = data_import(\"data/Corona_NLP_train.csv\", 'OriginalTweet', 'text')\n test = data_import(\"data/Corona_NLP_test.csv\", 'OriginalTweet','text')\n train_new = preprocess(train, 'text', 'Sentiment')\n test_new = preprocess(test, 'text', 'Sentiment')\n\n X_train, X_test = tfidf_transform(train_new, test_new, 'text')\n y_train = train_new.label.values\n y_test = test_new.label.values\n \n\n print('Train models!!!!')\n # Logistic\n\n lr1 = LogisticRegression(random_state=66,solver='lbfgs') # fit logistic\n lr1.fit(X_train, y_train)\n y_pred = lr1.predict(X_test) # predict\n\n # evaluation metrics\n print('Logistic model 1:')\n print(\"Accuracy: %0.4f\"%accuracy_score(y_test, y_pred))\n print(\"Micro-averaged F1 score: %0.4f\"%f1_score(y_test, y_pred, average='micro'))\n\n lr2 = LogisticRegression(random_state=66, C=15, penalty='l2',solver='lbfgs') # fit logistic\n lr2.fit(X_train, y_train)\n y_pred = lr2.predict(X_test) # predict\n\n # evaluation metrics\n print('Logistic model 2:')\n print(\"Accuracy: %0.4f\"%accuracy_score(y_test, y_pred))\n print(\"Micro-averaged F1 score: %0.4f\"%f1_score(y_test, y_pred, average='micro'))\n\n lr3 = LogisticRegression(random_state=66, C=10, penalty='l2',solver='lbfgs')\n lr3.fit(X_train, y_train)\n y_pred = lr3.predict(X_test) # predict\n\n # evaluation metrics\n print('Logistic model 3:')\n print(\"Accuracy: %0.4f\"%accuracy_score(y_test, y_pred))\n print(\"Micro-averaged F1 score: %0.4f\"%f1_score(y_test, y_pred, average='micro'))\n\n lr4 = LogisticRegression(random_state=66, C=15, penalty='l2',solver='liblinear')\n lr4.fit(X_train, y_train)\n y_pred = lr4.predict(X_test) # predict\n\n # evaluation metrics\n print('Logistic model 4:')\n print(\"Accuracy: %0.4f\"%accuracy_score(y_test, y_pred))\n print(\"Micro-averaged F1 score: %0.4f\"%f1_score(y_test, y_pred, average='micro'))\n\n lr5 = LogisticRegression(random_state=66, C=2, penalty='l1',solver='liblinear')\n lr5.fit(X_train, y_train)\n y_pred = lr5.predict(X_test) # predict\n\n # evaluation metrics\n print('Logistic model 5:')\n print(\"Accuracy: %0.4f\"%accuracy_score(y_test, y_pred))\n print(\"Micro-averaged F1 score: %0.4f\"%f1_score(y_test, y_pred, average='micro'))\n\n\n # fasttext\n\n # fasttext requires data to be in the format of: __label__1 text\n train_fasttext = train_new.apply(lambda t: '__label__' + str(t['label']) + ' ' + str(t['text']), axis=1)\n test_fasttext = test_new.apply(lambda t: '__label__' + str(t['label']) + ' ' + str(t['text']), axis=1)\n train_fasttext.to_csv('fasttext_train.txt',index=False, header=False)\n test_fasttext.to_csv('fasttext_test.txt',index=False, header=False)\n\n # fasttext model - default\n ft_model1 = fasttext.train_supervised('fasttext_train.txt')\n\n # calculate evaluation metrics\n result = ft_model1.test('fasttext_test.txt')\n precision = result[1]\n recall = result[2]\n print('Fasttext model 1:')\n print(\"F1 score: %0.4f\"%(2*precision*recall/(precision+recall)))\n\n # fasttext model - setting 1\n ft_model2 = fasttext.train_supervised('fasttext_train.txt',wordNgrams=2)\n result = ft_model2.test('fasttext_test.txt')\n precision = result[1]\n recall = result[2]\n print('Fasttext model 2:')\n print(\"F1 score: %0.4f\"%(2*precision*recall/(precision+recall)))\n\n # fasttext model - setting 2\n ft_model3 = fasttext.train_supervised('fasttext_train.txt',lr=0.2, wordNgrams=2)\n result = ft_model3.test('fasttext_test.txt')\n precision = result[1]\n recall = result[2]\n print('Fasttext model 3:')\n print(\"F1 score: %0.4f\"%(2*precision*recall/(precision+recall)))\n\n # fasttext model - setting 3\n ft_model4 = fasttext.train_supervised('fasttext_train.txt', lr=0.5, wordNgrams=2)\n result = ft_model4.test('fasttext_test.txt')\n precision = result[1]\n recall = result[2]\n print('Fasttext model 4:')\n print(\"F1 score: %0.4f\"%(2*precision*recall/(precision+recall)))\n\n\n # SVM\n\n svm1 = LinearSVC(random_state=66)\n svm1.fit(X_train, y_train)\n y_pred = svm1.predict(X_test)\n \n print('SVM model 1:')\n print(\"Accuracy: %0.4f\"%accuracy_score(y_test, y_pred))\n print(\"Micro-averaged F1 score: %0.4f\"%f1_score(y_test, y_pred, average='micro'))\n\n svm2 = LinearSVC(random_state=66, penalty='l2', C=10, loss='hinge')\n svm2.fit(X_train, y_train)\n y_pred = svm2.predict(X_test)\n \n print('SVM model 2:')\n print(\"Accuracy: %0.4f\"%accuracy_score(y_test, y_pred))\n print(\"Micro-averaged F1 score: %0.4f\"%f1_score(y_test, y_pred, average='micro'))\n\n svm3 = LinearSVC(random_state=66, penalty='l2', loss='squared_hinge', dual=False)\n svm3.fit(X_train, y_train)\n y_pred = svm3.predict(X_test)\n \n print('SVM model 3:')\n print(\"Accuracy: %0.4f\"%accuracy_score(y_test, y_pred))\n print(\"Micro-averaged F1 score: %0.4f\"%f1_score(y_test, y_pred, average='micro'))\n\n svm4 = LinearSVC(random_state=66, penalty='l1', loss='squared_hinge', dual=False)\n svm4.fit(X_train, y_train)\n y_pred = svm4.predict(X_test)\n \n print('SVM model 5:')\n print(\"Accuracy: %0.4f\"%accuracy_score(y_test, y_pred))\n print(\"Micro-averaged F1 score: %0.4f\"%f1_score(y_test, y_pred, average='micro'))\n\n\n # Random Forest\n\n max_depth = [10,30,50]\n n_estimators = [200,500]\n grid_params ={'max_depth':max_depth,'n_estimators':n_estimators}\n\n RandomFoest_model = GridSearchCV(RandomForestClassifier(class_weight = 'balanced'), grid_params,\n scoring = 'accuracy', cv=5,n_jobs=-1, return_train_score=True)\n RandomFoest_model.fit(X_train, y_train)\n\n results = pd.DataFrame.from_dict(RandomFoest_model.cv_results_)\n print(RandomFoest_model.best_estimator_)\n\n\n RandomFoest_model = RandomForestClassifier(bootstrap=True, class_weight='balanced',\n max_depth=50, n_estimators=200, random_state=66, verbose=0)\n RandomFoest_model.fit(X_train,y_train)\n\n y_pred = RandomFoest_model.predict(X_test)\n print(\"Accuracy: %0.4f\"%accuracy_score(y_test, y_pred))\n print(\"Micro-averaged F1 score: %0.4f\"%f1_score(y_test, y_pred, average='micro'))\n\n #Store best model\n ft_model1.save_model('model/fasttext_model')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"zekaizhu/Tweet-Text-Sentiment-Classification","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8532,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"27085068614","text":"import argparse\nimport logging\nimport math\nimport os\nimport random\nfrom functools import partial\nfrom packaging import version\n\n# Import from third party libraries\nimport datasets\nimport torch\nimport torch.nn.functional as F\nfrom datasets import load_dataset\nfrom datasets import load_metric\nfrom torch.utils.data import DataLoader\nfrom tqdm.auto import tqdm\n\nimport wandb\nimport transformers\nfrom transformers.models.bart.modeling_bart import shift_tokens_right\nfrom transformers import BartForConditionalGeneration, BartTokenizer\n\n# Imports from our module\nfrom transformer_mt import utils\n\n\n# Setup logging\nlogger = logging.getLogger(__file__)\nlogging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO,\n)\n\ndatasets.utils.logging.set_verbosity_warning()\ntransformers.utils.logging.set_verbosity_warning()\n\n\n# To compute BLEU we will use Huggingface Datasets implementation of it\n# Sacrebleu is a flavor of BLEU that standardizes some of the BLEU parameters.\nbleu = datasets.load_metric(\"sacrebleu\")\n\n\ndef parse_args():\n \"\"\"This function creates argument parser and parses the scrip input arguments.\n This is the most common way to define input arguments in python.\n\n To change the parameters, pass them to the script, for example:\n\n python cli/train.py \\\n --source_lang en \\\n --target_lang es \\\n --output_dir output_dir \\\n --weight_decay 0.01\n \n DO NOT MODIFY THIS FUNCTION\n This is not only restricted for this homework, but also a generally bad practice.\n Default arguments have the meaning of being a reasonable default value, not of the last arguments used.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Train machine translation transformer model\")\n\n # Required arguments\n parser.add_argument(\n \"--output_dir\",\n type=str,\n required=True,\n help=(\"Where to store the final model. \"\n \"Should contain the source and target tokenizers in the following format: \"\n \"output_dir/{source_lang}_tokenizer and output_dir/{target_lang}_tokenizer. \"\n \"Both of these should be directories containing tokenizer.json files.\"\n ),\n )\n # Data arguments\n parser.add_argument(\n \"--dataset_name\",\n type=str,\n default=\"stas/wmt14-en-de-pre-processed\",\n help=\"The name of the dataset to use (via the datasets library).\",\n )\n parser.add_argument(\n \"--dataset_config_name\",\n type=str,\n default=\"en-de\",\n help=(\"Many datasets in Huggingface Dataset repository have multiple versions or configs. \"\n \"For the case of machine translation these usually indicate the language pair like \"\n \"en-es or zh-fr or similar. To look up possible configs of a dataset, \"\n \"find it on huggingface.co/datasets.\"),\n )\n parser.add_argument(\n \"--debug\",\n default=False,\n action=\"store_true\",\n help=\"Whether to use a small subset of the dataset for debugging.\",\n )\n # Model arguments\n parser.add_argument(\n \"--num_layers\",\n default=6,\n type=int,\n help=\"Number of hidden layers in the Transformer encoder\",\n )\n parser.add_argument(\n \"--hidden_size\",\n default=512,\n type=int,\n help=\"Hidden size of the Transformer encoder\",\n )\n parser.add_argument(\n \"--num_heads\",\n default=8,\n type=int,\n help=\"Number of attention heads in the Transformer encoder\",\n )\n parser.add_argument(\n \"--fcn_hidden\",\n default=2048,\n type=int,\n help=\"Hidden size of the FCN\",\n )\n parser.add_argument(\n \"--max_seq_length\",\n type=int,\n default=128,\n help=\"The maximum total sequence length for source and target texts after \"\n \"tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.\"\n \"during ``evaluate`` and ``predict``.\",\n )\n parser.add_argument(\n \"--preprocessing_num_workers\",\n type=int,\n default=8,\n help=\"The number of processes to use for the preprocessing.\",\n )\n parser.add_argument(\n \"--overwrite_cache\",\n type=bool,\n default=None,\n help=\"Overwrite the cached training and evaluation sets\",\n )\n\n # Training arguments\n parser.add_argument(\n \"--device\",\n default=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n help=\"Device (cuda or cpu) on which the code should run\",\n )\n parser.add_argument(\n \"--batch_size\",\n type=int,\n default=8,\n help=\"Batch size (per device) for the training dataloader.\",\n )\n parser.add_argument(\n \"--learning_rate\",\n type=float,\n default=5e-5,\n help=\"Initial learning rate (after the potential warmup period) to use.\",\n )\n parser.add_argument(\n \"--weight_decay\",\n type=float,\n default=0.0,\n help=\"Weight decay to use.\",\n )\n parser.add_argument(\n \"--dropout_rate\",\n default=0.1,\n type=float,\n help=\"Dropout rate of the Transformer encoder\",\n )\n parser.add_argument(\n \"--num_train_epochs\",\n type=int,\n default=1,\n help=\"Total number of training epochs to perform.\",\n )\n parser.add_argument(\n \"--eval_every_steps\",\n type=int,\n default=5000,\n help=\"Perform evaluation every n network updates.\",\n )\n parser.add_argument(\n \"--logging_steps\",\n type=int,\n default=10,\n help=\"Compute and log training batch metrics every n steps.\",\n )\n parser.add_argument(\n \"--max_train_steps\",\n type=int,\n default=None,\n help=\"Total number of training steps to perform. If provided, overrides num_train_epochs.\",\n )\n parser.add_argument(\n \"--lr_scheduler_type\",\n type=transformers.SchedulerType,\n default=\"linear\",\n help=\"The scheduler type to use.\",\n choices=[\"linear\", \"cosine\", \"cosine_with_restarts\", \"polynomial\", \"constant\", \"constant_with_warmup\"],\n )\n parser.add_argument(\n \"--num_warmup_steps\", type=int, default=0, help=\"Number of steps for the warmup in the lr scheduler.\"\n )\n parser.add_argument(\n \"--generation_type\",\n choices=[\"greedy\", \"beam_search\"],\n default=\"beam_search\",\n )\n parser.add_argument(\n \"--beam_size\",\n type=int,\n default=5,\n help=(\"Beam size for beam search generation. \"\n \"Decreasing this parameter will make evaluation much faster, \"\n \"increasing this (until a certain value) would likely improve your results.\"\n ),\n )\n parser.add_argument(\n \"--seed\",\n type=int,\n default=None,\n help=\"A seed for reproducible training.\",\n )\n parser.add_argument(\n \"--wandb_project\", \n default=\"transformer_mt\",\n help=\"wandb project name to log metrics to\"\n )\n\n args = parser.parse_args()\n\n return args\n\n\ndef preprocess_function(\n examples,\n tokenizer,\n model\n):\n inputs = examples[\"article\"]\n targets = examples[\"highlights\"]\n\n input_encodings = tokenizer.batch_encode_plus(examples[\"article\"], pad_to_max_length=True, max_length=1024, truncation=True)\n target_encodings = tokenizer.batch_encode_plus(examples[\"highlights\"], pad_to_max_length=True, max_length=1024, truncation=True)\n #print(input_encodings.shape)\n\n labels = target_encodings['input_ids']\n decoder_input_ids = labels.copy()\n #labels[labels[:, :] == model.config.pad_token_id] = -100\n labels = [[-100 if token == tokenizer.pad_token_id else token for token in l] for l in labels]\n encodings = {\n 'input_ids': input_encodings['input_ids'],\n 'attention_mask': input_encodings['attention_mask'],\n 'decoder_input_ids': decoder_input_ids,\n 'labels': labels,\n }\n\n return encodings\n\n\ndef evaluate_model(\n model,\n dataloader,\n tokenizer,\n device,\n max_seq_length,\n generation_type,\n beam_size,\n):\n n_generated_tokens = 0\n model.eval()\n for batch in tqdm(dataloader, desc=\"Evaluation\"):\n with torch.inference_mode():\n input_ids = batch[\"input_ids\"].to(device)\n labels = batch[\"labels\"].to(device)\n labels[labels == -100] = tokenizer.pad_token_id\n \n generated_tokens = model.generate(\n input_ids,\n bos_token_id=tokenizer.bos_token_id,\n eos_token_id=tokenizer.eos_token_id,\n pad_token_id=tokenizer.pad_token_id,\n num_beams=beam_size,\n )\n decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)\n decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)\n\n for pred in decoded_preds:\n n_generated_tokens += len(tokenizer(pred)[\"input_ids\"])\n\n decoded_preds, decoded_labels = utils.postprocess_text(decoded_preds, decoded_labels)\n\n bleu.add_batch(predictions=decoded_preds, references=decoded_labels)\n\n model.train()\n eval_metric = bleu.compute()\n evaluation_results = {\n \"bleu\": eval_metric[\"score\"],\n \"generation_length\": n_generated_tokens / len(dataloader.dataset),\n }\n return evaluation_results, input_ids, decoded_preds, decoded_labels\n\n\ndef main():\n # Parse the arguments\n args = parse_args()\n logger.info(f\"Starting script with arguments: {args}\")\n\n # Initialize wandb as soon as possible to log all stdout to the cloud\n wandb.init(project=\"cnn_summarization\", config=args)\n\n ###############################################################################\n # Part 1: Load the data\n ###############################################################################\n\n # Make sure output directory exists, if not create it\n os.makedirs(args.output_dir, exist_ok=True)\n\n # Load the datasets\n raw_datasets = load_dataset('cnn_dailymail', '3.0.0')\n if \"validation\" not in raw_datasets:\n # will create \"train\" and \"test\" subsets\n # fix seed to make sure that the split is reproducible\n # note that we should use the same seed here and in create_tokenizer.py\n raw_datasets = raw_datasets[\"train\"].train_test_split(test_size=2000, seed=42)\n\n if args.debug:\n raw_datasets = utils.sample_small_debug_dataset(raw_datasets)\n\n ###############################################################################\n # Part 2: Create the model and load the tokenizers\n ###############################################################################\n\n tokenizer = BartTokenizer.from_pretrained(\"facebook/bart-large\")\n\n model = BartForConditionalGeneration.from_pretrained('facebook/bart-large')\n model.to(args.device)\n\n ###############################################################################\n # Part 3: Pre-process the data\n ###############################################################################\n\n # Preprocessing the datasets.\n # First we tokenize all the texts.\n column_names = raw_datasets[\"train\"].column_names\n\n preprocess_function_wrapped = partial(\n preprocess_function,\n tokenizer=tokenizer,\n model=model,\n )\n\n processed_datasets = raw_datasets.map(\n preprocess_function_wrapped,\n batched=True,\n remove_columns=column_names,\n load_from_cache_file=not args.overwrite_cache,\n desc=\"Running tokenizer on dataset\",\n )\n\n train_dataset = processed_datasets[\"train\"]\n eval_dataset = processed_datasets[\"validation\"] if \"validation\" in processed_datasets else processed_datasets[\"test\"]\n\n # Log a few random samples from the training set:\n for index in random.sample(range(len(train_dataset)), 2):\n logger.info(f\"Sample {index} of the training set: {train_dataset[index]}.\")\n logger.info(f\"Decoded input_ids: {tokenizer.decode(train_dataset[index]['input_ids'])}\")\n logger.info(\"\\n\")\n\n ###############################################################################\n # Part 4: Create PyTorch dataloaders that handle data shuffling and batching\n ###############################################################################\n\n #collation_function_for_seq2seq_wrapped = partial(\n # collation_function_for_seq2seq,\n # source_pad_token_id=source_tokenizer.pad_token_id,\n # target_pad_token_id=target_tokenizer.pad_token_id,\n #)\n\n from transformers.data.data_collator import DataCollatorWithPadding\n\n collator = transformers.data.data_collator.DataCollatorWithPadding(tokenizer)\n\n train_dataloader = DataLoader(\n train_dataset, shuffle=True, collate_fn=collator, batch_size=args.batch_size\n )\n eval_dataloader = DataLoader(\n eval_dataset, collate_fn=collator, batch_size=args.batch_size\n )\n # YOUR CODE ENDS HERE\n\n ###############################################################################\n # Part 5: Create optimizer and scheduler\n ###############################################################################\n\n optimizer = torch.optim.AdamW(\n model.parameters(),\n lr=args.learning_rate,\n weight_decay=args.weight_decay,\n )\n\n # Scheduler and math around the number of training steps.\n num_update_steps_per_epoch = len(train_dataloader)\n if args.max_train_steps is None:\n args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n else:\n args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n lr_scheduler = transformers.get_scheduler(\n name=args.lr_scheduler_type,\n optimizer=optimizer,\n num_warmup_steps=args.num_warmup_steps,\n num_training_steps=args.max_train_steps,\n )\n\n #gradient_accumulation_steps = 32\n\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {len(train_dataset)}\")\n logger.info(f\" Num Epochs = {args.num_train_epochs}\")\n logger.info(f\" Total optimization steps = {args.max_train_steps}\")\n progress_bar = tqdm(range(args.max_train_steps))\n\n # Log a pre-processed training example to make sure the pre-processing does not have bugs in it\n # and we do not input garbage to our model.\n batch = next(iter(train_dataloader))\n logger.info(\"Look at the data that we input into the model, check that it looks like what we expect.\")\n for index in random.sample(range(len(batch)), 2):\n logger.info(f\"Decoded input_ids: {tokenizer.decode(batch['input_ids'][0])}\")\n logger.info(f\"Decoded labels: {tokenizer.decode(batch['decoder_input_ids'][0])}\")\n logger.info(\"\\n\")\n\n ###############################################################################\n # Part 6: Training loop\n ###############################################################################\n global_step = 0\n\n\n # iterate over epochs\n for epoch in range(args.num_train_epochs):\n model.train() # make sure that model is in training mode, e.g. dropout is enabled\n\n # iterate over batches\n for batch in train_dataloader:\n input_ids = batch[\"input_ids\"].to(args.device)\n decoder_input_ids = batch[\"decoder_input_ids\"].to(args.device)\n attention_mask = batch[\"attention_mask\"].to(args.device)\n labels = batch[\"labels\"].to(args.device)\n\n outputs = model(\n input_ids,\n decoder_input_ids=decoder_input_ids,\n attention_mask=attention_mask,\n labels=labels,\n )\n\n loss = outputs.loss\n loss = loss\n loss = loss.backward()\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n\n wandb.log(\n {\n \"train_loss\": loss,\n \"learning_rate\": optimizer.param_groups[0][\"lr\"],\n \"epoch\": epoch,\n },\n step=global_step,\n )\n\n progress_bar.update(1)\n global_step += 1\n\n if global_step % args.logging_steps == 0:\n # An extra training metric that might be useful for understanding\n # how well the model is doing on the training set.\n # Please pay attention to it during training.\n # If the metric is significantly below 80%, there is a chance of a bug somewhere.\n predictions = outputs.logits.argmax(-1)\n label_nonpad_mask = labels != tokenizer.pad_token_id\n num_words_in_batch = label_nonpad_mask.sum().item()\n\n accuracy = (predictions == labels).masked_select(label_nonpad_mask).sum().item() / num_words_in_batch\n\n wandb.log(\n {\"train_batch_word_accuracy\": accuracy},\n step=global_step,\n )\n\n if global_step % args.eval_every_steps or global_step == args.max_train_steps:\n eval_results, last_input_ids, last_decoded_preds, last_decoded_labels = evaluate_model(\n model=model,\n dataloader=eval_dataloader,\n tokenizer=tokenizer,\n device=args.device,\n max_seq_length=1024,\n generation_type=args.generation_type,\n beam_size=args.beam_size,\n )\n # YOUR CODE ENDS HERE\n wandb.log(\n {\n \"eval/bleu\": eval_results[\"bleu\"],\n \"eval/generation_length\": eval_results[\"generation_length\"],\n },\n step=global_step,\n )\n logger.info(\"Generation example:\")\n random_index = random.randint(0, len(last_input_ids) - 1)\n logger.info(f\"Input sentence: {tokenizer.decode(last_input_ids[random_index], skip_special_tokens=True)}\")\n logger.info(f\"Generated sentence: {last_decoded_preds[random_index]}\")\n logger.info(f\"Reference sentence: {last_decoded_labels[random_index][0]}\")\n\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n model.save_pretrained(args.output_dir)\n\n if global_step >= args.max_train_steps:\n break\n\n ###############################################################################\n # Part 8: Save the model\n ###############################################################################\n\n logger.info(\"Saving final model checkpoint to %s\", args.output_dir)\n model.save_pretrained(args.output_dir)\n\n logger.info(\"Uploading tokenizer, model and config to wandb\")\n wandb.save(os.path.join(args.output_dir, \"*\"))\n\n logger.info(f\"Script finished succesfully, model saved in {args.output_dir}\")\n\n\nif __name__ == \"__main__\":\n if version.parse(datasets.__version__) < version.parse(\"1.18.0\"):\n raise RuntimeError(\"This script requires Datasets 1.18.0 or higher. Please update via pip install -U datasets.\")\n\n main()\n","repo_name":"rvavruska/NLPProject","sub_path":"cli/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":19299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"73293718633","text":"import uuid\nimport base64\nimport re\nfrom ast import literal_eval\n\n\nclass Event:\n def __init__(self, user_id, event_name, event_description, event_location,\n event_start_time, event_end_time,\n attendee_limit, event_id=None):\n if event_id is None:\n self.generate_id()\n else:\n self.event_id = event_id\n self.user_id = user_id\n self.event_name = event_name\n self.event_description = event_description\n self.event_location = event_location\n self.event_start_time = event_start_time\n self.event_end_time = event_end_time\n self.attendee_limit = attendee_limit\n\n def generate_id(self):\n self.event_id = re.sub(\"[^0-9a-zA-Z]+\", \"\",\n str(base64.b64encode(uuid.uuid4().bytes)))\n\n def to_dict(self):\n return {\n 'event_id': self.event_id,\n 'user_id': self.user_id,\n 'event_name': self.event_name,\n 'event_description': self.event_description,\n 'location': literal_eval(self.event_location)[0].strip('\"'),\n 'lat': literal_eval(self.event_location)[1],\n 'long': literal_eval(self.event_location)[2],\n 'address': literal_eval(self.event_location)[3].strip('\"'),\n 'start_time': self.event_start_time,\n 'end_time': self.event_end_time,\n 'attendee_limit': self.attendee_limit,\n }\n","repo_name":"junyanj1/advancedSE-Project","sub_path":"app/models/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"33192901831","text":"# chương trình tìm nghiệm của phương trình bậc 3 với các hệ số a, b, c\n\n#input\n\n# ax2 + bx + c = 0, where\n# a, b and c are real numbers and\n# a ≠ 0\n\n# output\n# (-b ± (b ** 2 - 4 * a * c) ** 0.5) / 2 * a\n\n\nimport cmath\n\na = float(input(\"Nhập a: \"))\nb = float(input(\"Nhập b: \"))\nc = float(input(\"Nhập c: \"))\n\n# tính số phân biệt\nd = (b**2) - (4*a*c)\n\n# tính kết quả\nsol1 = (-b - cmath.sqrt(d))/(2*a)\nsol2 = (-b + cmath.sqrt(d))/(2*a)\n\nprint('The solution are {0} and {1}'.format(sol1,sol2))\n","repo_name":"phu68/python","sub_path":"programing_example/06_solve_quadratic_equation.py","file_name":"06_solve_quadratic_equation.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"71391602152","text":"import collections\nn,m = map(int,input().split())\n\nboard = [list(map(int,list(input()))) for _ in range(n)]\ncheck = [[[0,0]for _ in range(m)] for _ in range(n)]\ncheck[0][0][0] = 1\n\nnx = [1,0,-1,0]\nny = [0,1,0,-1]\ndef bfs(x,y,wall):\n st = collections.deque()\n st.append((x,y,wall))\n while st :\n x,y,wall = st.popleft()\n if x == n-1 and y == m-1 :\n return check[x][y][wall]\n\n for i in range(4):\n dx = x + nx[i]\n dy = y + ny[i]\n if dx < 0 or dy < 0 or dx >= n or dy >= m :\n continue\n if board[dx][dy] == 1 and wall == 0 :\n check[dx][dy][wall+1] = check[x][y][wall]+1\n st.append((dx,dy,wall+1))\n if board[dx][dy] == 0 and check[dx][dy][wall] == 0 :\n st.append((dx,dy,wall))\n check[dx][dy][wall] = check[x][y][wall]+1\n return -1\nprint(bfs(0,0,0))\n","repo_name":"jjmin9797/algorithm","sub_path":"20220829/2206.py","file_name":"2206.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"13306111118","text":"import torch \nfrom torch.utils.data import Dataset, DataLoader\n\nclass KorFIN_ABSADatasetforBERT(Dataset):\n def __init__(self, src, label, classification={\"POSITIVE\":0, \"NEGATIVE\":1,\"NEUTRAL\":2}):\n self.src = src\n self.label = label\n self.classification = classification\n\n def __len__(self):\n return len(self.src)\n\n def __getitem__(self, idx):\n src = self.src[idx] + '.'\n label = self.classification[self.label[idx].upper()]\n\n return {\n 'src':src,\n 'label':label\n }\n\n\nclass Seq2SeqBatchGenerator:\n def __init__(self, \n tokenizer\n ):\n \n self.tokenizer = tokenizer\n \n def __call__(self, batch):\n src = [item['src'] for item in batch]\n label = [item['label'] for item in batch]\n\n src_tokenized = self.tokenize(src)\n\n return {\n 'src_input_ids': src_tokenized.input_ids, \n 'src_attention_mask': src_tokenized.attention_mask,\n 'src_token_type_ids':src_tokenized.token_type_ids,\n 'label':torch.tensor(label)\n }\n\n def tokenize(self,input_str):\n return self.tokenizer.batch_encode_plus(input_str, \n padding='longest', \n max_length=512,\n truncation=True, \n return_tensors='pt')\n\n\ndef get_dataloader(dataset, batch_generator, batch_size=4, shuffle=True):\n data_loader = DataLoader(dataset, \n batch_size=batch_size, \n shuffle=shuffle, \n collate_fn=batch_generator,\n drop_last=True,\n num_workers=4)\n return data_loader\n","repo_name":"guijinSON/funcs","sub_path":"KF-ABSA/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"30906934449","text":"import json\nimport os\nfrom unittest import mock\nfrom unittest.mock import patch, MagicMock, call\n\nfrom conductr_cli import logging_setup\nfrom conductr_cli.bundle_core_info import BundleCoreInfo\nfrom conductr_cli.conductr_restore import unpack_backup, process_bundle, compatible_bundle, scale_bundle, restore\nfrom conductr_cli.test.cli_test_case import file_contents, CliTestCase, as_error, strip_margin\n\n\nclass TestConductrRestore(CliTestCase):\n conductr_auth = ('username', 'password')\n server_verification_file = MagicMock(name='server_verification_file')\n\n args = {\n 'dcos_mode': False,\n 'command': 'conduct',\n 'scheme': 'http',\n 'host': '127.0.0.1',\n 'port': 9005,\n 'base_path': '/',\n 'api_version': '1',\n 'disable_instructions': False,\n 'verbose': False,\n 'no_wait': False,\n 'quiet': False,\n 'cli_parameters': '',\n 'backup': 'bkp.zip',\n 'output_path': '/my/fav/path',\n 'conductr_auth': conductr_auth,\n 'server_verification_file': server_verification_file\n }\n\n @patch('tempfile.mkdtemp')\n @patch('shutil.unpack_archive')\n def test_unpack_backup(self, archive_mock, tmp_mock):\n backup = MagicMock()\n tmp_mock.return_value = \"something\"\n result = unpack_backup(backup)\n\n archive_mock.assert_called_once_with(backup, tmp_mock.return_value, format='zip')\n self.assertEqual(tmp_mock.return_value, result)\n\n @patch('conductr_cli.control_protocol.load_bundle')\n @patch('conductr_cli.conduct_load.create_multipart')\n @patch('conductr_cli.bundle_utils.conf')\n def test_process_bundle(self, mock_conf, mock_multipart, mock_load_bundle):\n mock_args = MagicMock()\n mock_conf.return_value = 'hello'\n mock_multipart.return_value = MagicMock()\n bundle_info = BundleCoreInfo('1', 'b_name', '1234', '5678')\n open_mock = mock.mock_open()\n\n with patch('builtins.open', open_mock):\n process_bundle(mock_args, 'yolo', bundle_info)\n\n calls = [call(os.path.join('yolo', 'b_name-1234.zip'), 'rb'),\n call(os.path.join('yolo', 'b_name-5678.zip'), 'rb')]\n open_mock.assert_has_calls(calls)\n\n conf_calls = [call(os.path.join('yolo', 'b_name-1234.zip')),\n call(os.path.join('yolo', 'b_name-5678.zip'))]\n mock_conf.assert_has_calls(conf_calls)\n mock_load_bundle.assert_called_once_with(mock_args, mock_multipart.return_value)\n\n @patch('conductr_cli.control_protocol.load_bundle')\n @patch('conductr_cli.conduct_load.create_multipart')\n @patch('conductr_cli.bundle_utils.conf')\n def test_process_bundle_with_no_configuration(self, mock_conf, mock_multipart, mock_load_bundle):\n mock_args = MagicMock()\n mock_conf.return_value = 'hello'\n mock_multipart.return_value = MagicMock()\n bundle_info = BundleCoreInfo('1', 'b_name', '1234', '')\n open_mock = mock.mock_open()\n\n with patch('builtins.open', open_mock):\n process_bundle(mock_args, 'yolo', bundle_info)\n\n open_mock.assert_called_once_with(os.path.join('yolo', 'b_name-1234.zip'), 'rb')\n mock_conf.assert_called_once_with(os.path.join('yolo', 'b_name-1234.zip'))\n\n mock_load_bundle.assert_called_once_with(mock_args, mock_multipart.return_value)\n\n def test_should_find_compatible_bundles(self):\n b1 = BundleCoreInfo('1', 'b1', 'abcd', 'efgh', 2, compatibility_version=1)\n b2 = BundleCoreInfo('2', 'b2', 'qwer', 'ty', 2, compatibility_version=1)\n b3 = BundleCoreInfo('3', 'b3', 'ghjk', 'polk', 3, compatibility_version=3)\n b4 = BundleCoreInfo('4', 'b3', 'yolo', 'why', 3, compatibility_version=4)\n\n matched = compatible_bundle([b1, b2, b3, b4], 'b3', 3)\n self.assertEqual('3', matched)\n\n no_match = compatible_bundle([b1, b2, b3, b4], 'b3', 2)\n self.assertEqual(None, no_match)\n\n not_matched = compatible_bundle([b1, b2, b3, b4], 'b5', 3)\n self.assertEqual(None, not_matched)\n\n @patch('copy.deepcopy')\n @patch('conductr_cli.bundle_scale.wait_for_scale')\n @patch('conductr_cli.control_protocol.run_bundle')\n def test_should_call_run_with_modified_args(self, mock_run_bundle,\n scale_mock, mock_copy):\n mock_args = MagicMock(**self.args)\n mock_run_bundle.return_value = json.loads('{\"bundleId\":\"abcd\"}')\n mock_copy.return_value = MagicMock()\n scale_bundle(mock_args, 'abcd', 3, 'efgh')\n\n mock_run_bundle.assert_called_once_with(mock_copy.return_value)\n scale_mock.assert_called_once_with('abcd', 3, wait_for_is_active=True, args=mock_copy.return_value)\n\n @patch('conductr_cli.control_protocol.get_bundles')\n @patch('conductr_cli.conductr_restore.unpack_backup')\n def test_restore_should_log_load_errors(self, mock_unpack, mock_bundle):\n mock_args = MagicMock(**self.args)\n stdout = MagicMock()\n stderr = MagicMock()\n\n mock_unpack.return_value = 'my/path'\n mock_bundle.return_value = json.loads(file_contents('data/bundles/bundle_json_modified.json'))\n\n logging_setup.configure_logging(mock_args, stdout, stderr)\n\n open_mock = mock.mock_open(read_data=file_contents('data/bundles/bundle_json.json'))\n\n with patch('builtins.open', open_mock):\n restore(mock_args)\n\n mock_unpack.assert_called_once_with('bkp.zip')\n mock_bundle.assert_called_once_with(mock_args)\n\n self.assertEqual(\n strip_margin(\"\"\"|Restoring bundle : continuous-delivery.\n |Restoring bundle : eslite.\n |Restoring bundle : visualizer.\n |Restoring bundle : reactive-maps-backend-region.\n |Restoring bundle : reactive-maps-backend-summary.\n |Restoring bundle : reactive-maps-frontend.\n |\"\"\"),\n self.output(stdout))\n\n self.assertEqual(\n as_error(strip_margin(\"\"\"|Error: continuous-delivery could not be loaded.\n |Error: eslite could not be loaded.\n |Error: visualizer could not be loaded.\n |Error: reactive-maps-backend-region could not be loaded.\n |Error: reactive-maps-backend-summary could not be loaded.\n |Error: reactive-maps-frontend could not be loaded.\n |\"\"\")),\n self.output(stderr))\n\n @patch('conductr_cli.conductr_restore.scale_bundle')\n @patch('conductr_cli.conductr_restore.compatible_bundle')\n @patch('conductr_cli.conductr_restore.process_bundle')\n @patch('conductr_cli.control_protocol.get_bundles')\n @patch('conductr_cli.conductr_restore.unpack_backup')\n def test_restore_should_scale_loaded_bundles(self, mock_unpack,\n mock_bundle, mock_process_bundle,\n mock_compatible, mock_scale_bundle):\n mock_args = MagicMock(**self.args)\n\n mock_unpack.return_value = 'my/path'\n mock_process_bundle.return_value = '1234'\n mock_compatible.return_value = 'yolo'\n\n bundles = file_contents('data/bundles/bundle_json.json')\n mock_bundle.return_value = json.loads(file_contents('data/bundles/bundle_json_modified.json'))\n bundles_info = sorted(BundleCoreInfo.from_bundles(json.loads(bundles)),\n key=lambda b: b.start_time)\n\n open_mock = mock.mock_open(read_data=bundles)\n\n with patch('builtins.open', open_mock):\n restore(mock_args)\n\n mock_unpack.assert_called_once_with('bkp.zip')\n mock_bundle.assert_called_once_with(mock_args)\n\n calls = [call(mock_args, 'my/path', bundles_info[0]),\n call(mock_args, 'my/path', bundles_info[1]),\n call(mock_args, 'my/path', bundles_info[2]),\n call(mock_args, 'my/path', bundles_info[3]),\n call(mock_args, 'my/path', bundles_info[4]),\n call(mock_args, 'my/path', bundles_info[5])]\n\n mock_process_bundle.assert_has_calls(calls)\n\n dest_bundles_info = BundleCoreInfo.from_bundles(mock_bundle.return_value)\n compat_calls = [call(dest_bundles_info, 'continuous-delivery', '3'),\n call(dest_bundles_info, 'eslite', '1'),\n call(dest_bundles_info, 'visualizer', '2'),\n call(dest_bundles_info, 'reactive-maps-backend-region', '1'),\n call(dest_bundles_info, 'reactive-maps-backend-summary', '1'),\n call(dest_bundles_info, 'reactive-maps-frontend', '1')]\n\n mock_compatible.assert_has_calls(compat_calls)\n\n scale_calls = [call(mock_args, '1234', 1, 'yolo'),\n call(mock_args, '1234', 1, 'yolo'),\n call(mock_args, '1234', 1, 'yolo'),\n call(mock_args, '1234', 1, 'yolo'),\n call(mock_args, '1234', 0, 'yolo'),\n call(mock_args, '1234', 1, 'yolo')]\n\n mock_scale_bundle.assert_has_calls(scale_calls)\n","repo_name":"typesafehub/conductr-cli","sub_path":"conductr_cli/test/test_conductr_restore.py","file_name":"test_conductr_restore.py","file_ext":"py","file_size_in_byte":9370,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"72"}
+{"seq_id":"23379015082","text":"import torch\nimport torch.optim as optim\nimport torch.nn as nn\nfrom torch_sparse import spspmm\nimport pandas as pd\nimport numpy as np\n# Creating dataset\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\nfrom blobs import adj_m\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nif device == \"cuda:0\":\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n\n\nclass LSM(nn.Module):\n def __init__(self, input_size, latent_dim, sparse_i_idx, sparse_j_idx, count, sample_i_size, sample_j_size):\n super(LSM, self).__init__()\n self.input_size = input_size\n self.latent_dim = latent_dim\n\n self.beta = torch.nn.Parameter(torch.randn(self.input_size[0], device=device))\n self.gamma = torch.nn.Parameter(torch.randn(self.input_size[1], device=device))\n\n self.latent_zi = torch.nn.Parameter(torch.randn(self.input_size[0], self.latent_dim, device=device))\n self.latent_zj = torch.nn.Parameter(torch.randn(self.input_size[1], self.latent_dim, device=device))\n # Change sample weights for each partition\n self.sampling_i_weights = torch.ones(input_size[0]).to(device)\n #self.sampling_i_weights[test_idx_i] = 0 #dont sample test set :D\n self.sampling_j_weights = torch.ones(input_size[1]).to(device)\n #self.sampling_j_weights[test_idx_j] = 0 #same\n # Change sample sizes for each partition\n self.sample_i_size = sample_i_size\n self.sample_j_size = sample_j_size\n\n self.sparse_i_idx = sparse_i_idx\n self.sparse_j_idx = sparse_j_idx\n\n self.count = count\n\n self.z_dist = 0\n self.Lambda = 0\n\n def sample_network(self):\n # USE torch_sparse lib i.e. : from torch_sparse import spspmm\n\n # sample for bipartite network\n sample_i_idx = torch.multinomial(self.sampling_i_weights, self.sample_i_size, replacement=False).to(device)\n sample_j_idx = torch.multinomial(self.sampling_j_weights, self.sample_j_size, replacement=False).to(device)\n # translate sampled indices w.r.t. to the full matrix, it is just a diagonal matrix\n indices_i_translator = torch.cat([sample_i_idx.unsqueeze(0), sample_i_idx.unsqueeze(0)], 0).to(device)\n indices_j_translator = torch.cat([sample_j_idx.unsqueeze(0), sample_j_idx.unsqueeze(0)], 0).to(device)\n # adjacency matrix in edges format\n edges = torch.cat([self.sparse_i_idx.unsqueeze(0), self.sparse_j_idx.unsqueeze(0)], 0)\n # matrix multiplication B = Adjacency x Indices translator\n # see spspmm function, it give a multiplication between two matrices\n # indexC is the indices where we have non-zero values and valueC the actual values (in this case ones)\n indexC, valueC = spspmm(edges, self.count.float(), indices_j_translator,\n torch.ones(indices_j_translator.shape[1],device=device), self.input_size[0], self.input_size[1],\n self.input_size[1], coalesced=True)\n # second matrix multiplication C = Indices translator x B, indexC returns where we have edges inside the sample\n indexC, valueC = spspmm(indices_i_translator, torch.ones(indices_i_translator.shape[1],device=device), indexC, valueC,\n self.input_size[0], self.input_size[0], self.input_size[1], coalesced=True)\n\n # edge row position\n sparse_i_sample = indexC[0, :]\n # edge column position\n sparse_j_sample = indexC[1, :]\n\n return sample_i_idx, sample_j_idx, sparse_i_sample, sparse_j_sample, valueC\n\n def log_likelihood(self):\n sample_i_idx, sample_j_idx, sparse_i_sample, sparse_j_sample, valueC = self.sample_network()\n self.z_dist = (((torch.unsqueeze(self.latent_zi[sample_i_idx], 1) - self.latent_zj[\n sample_j_idx] + 1e-06) ** 2).sum(-1)) ** 0.5\n bias_matrix = torch.unsqueeze(self.beta[sample_i_idx], 1) + self.gamma[sample_j_idx]\n self.Lambda = bias_matrix - self.z_dist\n z_dist_links = (((self.latent_zi[sparse_i_sample] - self.latent_zj[sparse_j_sample] + 1e-06) ** 2).sum(\n -1)) ** 0.5\n bias_links = self.beta[sparse_i_sample] + self.gamma[sparse_j_sample]\n log_Lambda_links = valueC * (bias_links - z_dist_links)\n LL = (log_Lambda_links-torch.lgamma(valueC+1)).sum() - torch.sum(torch.exp(self.Lambda))\n\n return LL\n\n def link_prediction(self, test_idx_i, test_idx_j, test_value):\n with torch.no_grad():\n # Distance measure (euclidian)\n z_pdist_test = (((self.latent_zi[test_idx_i] - self.latent_zj[test_idx_j] + 1e-06) ** 2).sum(-1)) ** 0.5\n\n # Add bias matrices\n logit_u_test = -z_pdist_test + self.beta[test_idx_i] + self.gamma[test_idx_j]\n\n # Get the rate\n rate = torch.exp(logit_u_test)\n\n # Create target (make sure its in the right order by indexing)\n target = test_value\n\n fpr, tpr, threshold = metrics.roc_curve(target.cpu().data.numpy(), rate.cpu().data.numpy())\n\n # Determining AUC score and precision and recall\n auc_score = metrics.roc_auc_score(target.cpu().data.numpy(), rate.cpu().data.numpy())\n return auc_score, fpr, tpr\n\n # Implementing test log likelihood without mini batching\n def test_log_likelihood(self, test_idx_i, test_idx_j, test_value):\n with torch.no_grad():\n z_dist = (((self.latent_zi[test_idx_i] - self.latent_zj[test_idx_j] + 1e-06) ** 2).sum(-1)) ** 0.5\n\n bias_matrix = self.beta[test_idx_i] + self.gamma[test_idx_j]\n Lambda = (bias_matrix - z_dist)\n LL_test = ((test_value * Lambda) - (torch.lgamma(test_value+1))).sum() - torch.sum(torch.exp(Lambda))\n return LL_test\n\n\nif __name__ == \"__main__\":\n A = adj_m\n\n\n '''idx = torch.where(A > 0)\n\n value = A[idx]\n\n idx_i = idx[0]\n\n idx_j = idx[1]'''\n\n #Lists to store obtained losses\n train_loss = []\n test_loss = []\n\n A_shape = (2000, 1000)\n num_samples = 200000\n idx_i_test = torch.multinomial(input=torch.arange(0, float(A_shape[0])), num_samples=num_samples,\n replacement=True)\n idx_j_test = torch.multinomial(input=torch.arange(0, float(A_shape[1])), num_samples=num_samples,\n replacement=True)\n\n A = torch.tensor(A)\n\n value_test = A[idx_i_test, idx_j_test].numpy()\n\n A[idx_i_test, idx_j_test] = 0\n\n # Train data\n train_data_idx = torch.where(A > 0)\n values_train = A[train_data_idx[0], train_data_idx[1]].numpy()\n\n train_idx_i = train_data_idx[0]\n train_idx_j = train_data_idx[1]\n train_value = torch.tensor(values_train)\n\n test_idx_i = idx_i_test\n test_idx_j = idx_j_test\n test_value = torch.tensor(value_test)\n\n test_value[test_value > 0] = 1\n\n learning_rate = 0.01 # Learning rate for adam\n\n #For binary link prediction\n #Lists to obtain values for AUC, FPR, TPR and loss\n AUC_scores = []\n tprs = []\n base_fpr = np.linspace(0, 1, 101)\n plt.figure(figsize=(5,5))\n\n\n # Define the model with training data.\n # Cross-val loop validating 5 seeds;\n for i in range(5):\n np.random.seed(i)\n torch.manual_seed(i)\n\n model = LSM(input_size=(2000, 1000), latent_dim=2, sparse_i_idx=train_idx_i, sparse_j_idx=train_idx_j, count=train_value,\n sample_i_size=2000, sample_j_size=1000).to(device)\n\n #Deine the optimizer.\n optimizer = optim.Adam(params=list(model.parameters()), lr=learning_rate)\n cum_loss_train = []\n cum_loss_test = []\n\n # Run iterations.\n iterations = 10\n\n for _ in range(iterations):\n loss = -model.log_likelihood()\n loss_test = -model.test_log_likelihood(test_idx_i, test_idx_j, test_value)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n cum_loss_test.append(loss_test.item() / num_samples)\n cum_loss_train.append(loss.item() / ((model.sample_i_size*model.sample_j_size)))\n\n\n train_loss.append(cum_loss_train)\n test_loss.append(cum_loss_test)\n\n # Binary link-prediction:\n auc_score, fpr, tpr = model.link_prediction(test_idx_i, test_idx_j, test_value)\n\n AUC_scores.append(auc_score)\n plt.plot(fpr, tpr, 'b', alpha=0.15)\n tpr = np.interp(base_fpr, fpr, tpr)\n tpr[0] = 0.0\n tprs.append(tpr)\n\n print(np.mean(AUC_scores) + np.array([-1,1]) * 1.96 * np.sqrt(np.var(AUC_scores)/len(AUC_scores)))\n print(np.mean(AUC_scores))\n print( 1.96 * np.sqrt(np.var(AUC_scores)/len(AUC_scores)))\n\n tprs = np.array(tprs)\n mean_tprs = tprs.mean(axis=0)\n std = tprs.std(axis=0)\n\n # USing standard deviation as error bars\n tprs_upper = np.minimum(mean_tprs + std, 1)\n tprs_lower = mean_tprs - std\n\n plt.plot(base_fpr, mean_tprs, 'b', label='Mean ROC-curve')\n plt.fill_between(base_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.3)\n\n plt.plot([0, 1], [0, 1], 'r--', label='Random classifier')\n plt.xlim([0, 1])\n plt.ylim([0, 1])\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n plt.axes().set_aspect('equal', 'datalim')\n plt.grid()\n plt.legend()\n plt.show()\n plt.savefig('Average_ROC_curve.png')\n plt.clf()\n\n\n","repo_name":"DanielHolmelund/Learning-and-Visualizing-Bipartite-Network-Embeddings","sub_path":"Binary_link_pred_blobs.py","file_name":"Binary_link_pred_blobs.py","file_ext":"py","file_size_in_byte":9380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"27753689550","text":"import typing\n\nfrom midcom_tax.midcom import Product\n\nfrom PySide6 import QtCore\nfrom PySide6.QtCore import Qt\n\n\nclass TaxModel(QtCore.QAbstractTableModel):\n def __init__(self, data):\n super(TaxModel, self).__init__()\n self._data = data\n self.headers = [\n 'ID',\n 'Tax Type \"$\" / \"%\"',\n 'Rate \"XX.XXXX\"',\n 'Tax Subtotal? \"Y\" / \"N\"',\n 'Label (15 Character Maximum)'\n ]\n\n def data(self, index: QtCore.QModelIndex, role: int = ...):\n if role == Qt.DisplayRole:\n tax = self._data[index.row()]\n col = index.column()\n if col == 0:\n return str(tax.id)\n elif col == 1:\n return str(tax.tax_type)\n elif col == 2:\n return str(tax.tax_rate)\n elif col == 3:\n return str(tax.tax_subtotal)\n elif col == 4:\n return str(tax.label)\n\n def setData(self, index: QtCore.QModelIndex, value: typing.Any, role: int = ...):\n if role == Qt.EditRole:\n col = index.column()\n row = index.row()\n\n # ID\n if col == 0:\n return False\n\n # Tax Type\n elif col == 1:\n if value not in ['$', '%']:\n return False\n self._data[row].tax_type = value\n\n # Tax Rate\n elif col == 2:\n if len(value) != 6:\n return False\n self._data[row].tax_rate = value\n\n # Tax Subtotal\n elif col == 3:\n if value not in ['Y', 'N']:\n return False\n self._data[row].tax_subtotal = value\n\n # Label\n elif col == 4:\n trimmed = value[:15]\n label_len = len(trimmed)\n if label_len < 15:\n trimmed += ' ' * (15-label_len)\n print(len(trimmed))\n self._data[row].label = trimmed\n\n return True\n\n def rowCount(self, parent: QtCore.QModelIndex = ...):\n return len(self._data)\n\n def columnCount(self, parent: QtCore.QModelIndex = ...):\n return len(self.headers)\n\n def flags(self, index: QtCore.QModelIndex) -> QtCore.Qt.ItemFlags:\n if index.row() == 0:\n # First tax entry cannot be used.\n return Qt.ItemIsSelectable\n else:\n return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable\n\n def headerData(self, section: int, orientation: QtCore.Qt.Orientation, role: int = ...) -> typing.Any:\n if orientation == Qt.Horizontal and role == Qt.DisplayRole:\n return self.headers[section]\n\n\nclass ProductModel(QtCore.QAbstractTableModel):\n def __init__(self, data):\n super(ProductModel, self).__init__()\n self._data = data\n self.headers = [\n 'ID',\n 'Product tax combination'\n ]\n\n def data(self, index: QtCore.QModelIndex, role: int = ...) -> typing.Any:\n if role == Qt.DisplayRole:\n product = self._data[index.row()]\n col = index.column()\n if col == 0:\n return str(product.id)\n elif col == 1:\n xs = []\n for x in product.taxes:\n if x == '00' and len(xs) == 0:\n xs = ['00']\n break\n elif x == '00':\n break\n xs.append(x)\n return ''.join(xs)\n\n def setData(self, index: QtCore.QModelIndex, value: typing.Any, role: int) -> bool:\n if role == Qt.EditRole:\n col = index.column()\n row = index.row()\n\n # ID\n if col == 0:\n return False\n\n # Product Code\n elif col == 1:\n self._data[row].load_user_input_str(value)\n\n return True\n\n def rowCount(self, parent: QtCore.QModelIndex = ...) -> int:\n return len(self._data)\n\n def columnCount(self, parent: QtCore.QModelIndex = ...) -> int:\n return len(self.headers)\n\n def flags(self, index: QtCore.QModelIndex) -> QtCore.Qt.ItemFlags:\n if index.row() == 0:\n return Qt.ItemIsSelectable\n elif index.column() == 0:\n return Qt.ItemIsSelectable | Qt.ItemIsEnabled\n else:\n return Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable\n\n def headerData(self, section: int, orientation: QtCore.Qt.Orientation, role: int = ...) -> typing.Any:\n if orientation == Qt.Horizontal and role == Qt.DisplayRole:\n return self.headers[section]\n","repo_name":"ZachMassia/midcom-tax-python","sub_path":"midcom_tax/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4724,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"23637416490","text":"import random\n\npick = ['R', 'P', 'S']\nplay = False\ncomputer = random.choice(pick)\nplayer_counter = 0\ncomputer_counter = 0\n\nwhile play == False:\n player = input(\"Enter your move \\n\").lower()\n \n if player == 'P' and computer == 'R' or player == 'S' and computer == 'P' or player == 'R' and computer == 'S': \n play = True\n print(\"You win\")\n player_counter =+ 1\n\n elif player == 'R' and computer == 'P' or player == 'P' and computer == 'S' or player == 'S' and computer == 'R':\n play = True\n print(\"You loose\")\n computer_counter =+ 1\nelse:\n print(\"It is a tie\")","repo_name":"OfficialOkanlawon/zuri-js-calculator","sub_path":"auth/iii.py","file_name":"iii.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"18363615091","text":"import csv\nimport os\nimport glob\nimport pytz\nimport datetime\nimport logging as log\nfrom scrapy import signals\nfrom scrapy.exporters import CsvItemExporter\nfrom rotating_proxies.policy import BanDetectionPolicy\n\n\nOUTPUT_PATH = \"/home/FM/results/Category_Indexing\"\n# OUTPUT_PATH = '/Users/PathakUmesh/Programming_stuffs/NIGAM/CATEGORY'\n\nclass ExtractPipeline(object):\n def __init__(self):\n self.files = {}\n self.extracted_categories = list()\n if not os.path.exists(OUTPUT_PATH):\n os.makedirs(OUTPUT_PATH)\n utc_time = datetime.datetime.utcnow()\n tz_info = pytz.timezone('Asia/Kolkata')\n utc = pytz.utc\n time_local = utc.localize(utc_time).astimezone(tz_info)\n self.start_formatted_time = time_local.strftime('%d%b%Y_%Hhr%Mmin')\n self.file_name = '{}/Amazon_Categories_{}.csv'.format(OUTPUT_PATH, self.start_formatted_time)\n self.get_category_map()\n self.export_fields = ['category', 'url']\n\n\n def get_category_map(self):\n self.category_map = {}\n category_path = '{}/*'.format(OUTPUT_PATH)\n list_of_files = glob.glob(category_path)\n if not list_of_files:\n return\n file_path = max(list_of_files, key=os.path.getctime)\n log.info('[+++++] Current latest category file is: {}'.format(file_path))\n with open(file_path, 'r') as csvfile:\n csvreader = csv.reader(csvfile)\n for i, buy_row in enumerate(csvreader):\n if i == 0:\n continue\n if not self.category_map.get(str(buy_row[0])):\n self.category_map.update({str(buy_row[0]):str(buy_row[1])})\n\n\n @classmethod\n def from_crawler(cls, crawler):\n pipeline = cls()\n crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)\n crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)\n return pipeline\n\n def spider_opened(self, spider):\n self.start_time = datetime.datetime.utcnow()\n tz_info = pytz.timezone('Asia/Kolkata')\n utc = pytz.utc\n time_local = utc.localize(self.start_time).astimezone(tz_info)\n log.info('[+++++] Starting Time (IST;Asia-Mumbai): {}'.format(time_local.strftime('%b %d, %Y @%Hhr %Mmin %Ssec')))\n output_file = open(self.file_name, 'w+b')\n self.files[spider] = output_file\n self.exporter = CsvItemExporter(output_file,fields_to_export = self.export_fields)\n self.exporter.start_exporting()\n\n def spider_closed(self, spider):\n log.info('[+++] Finished fetching; now adding missing urls from previous file')\n for category, url in self.category_map.items():\n item = {'category': category, 'url': url}\n self.exporter.export_item(item)\n log.info('[+++] {}'.format(category))\n log.info('[+++] Added {} urls from previous file'.format(len(self.category_map)))\n self.exporter.finish_exporting()\n output_file = self.files.pop(spider)\n output_file.close()\n \n utc_time = datetime.datetime.utcnow()\n tz_info = pytz.timezone('Asia/Kolkata')\n utc = pytz.utc\n started_time = utc.localize(self.start_time).astimezone(tz_info)\n finished_time = utc.localize(utc_time).astimezone(tz_info)\n\n self.finish_formatted_time = finished_time.strftime('%d%b%Y_%Hhr%Mmin')\n\n new_name = self.file_name.replace(self.start_formatted_time, self.finish_formatted_time)\n os.rename(self.file_name, new_name)\n\n log.info('[+++++] Output file path: /home/FM/results/Trade_In/Amazon_Buy')\n log.info('[+++++] Output file name: {}'.format(new_name.rsplit('/',1)[-1]))\n\n\n time_taken = self.strfdelta(utc_time-self.start_time, \"{hours} hours {minutes} minutes {seconds} seconds\")\n log.info('[+++++] Starting Time (IST;Asia-Mumbai): {}'.format(started_time.strftime('%b %d, %Y @%Hhr %Mmin %Ssec')))\n log.info('[+++++] Finished Time (IST;Asia-Mumbai): {}'.format(finished_time.strftime('%b %d, %Y @%Hhr %Mmin %Ssec')))\n log.info('[+++++] Total Time Taken: {}'.format(time_taken))\n\n def process_item(self, item, spider):\n category = item['category']\n if category not in self.extracted_categories:\n self.extracted_categories.append(category)\n self.exporter.export_item(item)\n\n if self.category_map.get(category):\n self.category_map.pop(category) \n return item\n\n def strfdelta(self, tdelta, fmt):\n d = dict()\n d[\"hours\"], rem = divmod(tdelta.seconds, 3600)\n d[\"minutes\"], d[\"seconds\"] = divmod(rem, 60)\n return fmt.format(**d)\n\nclass BanPolicy(BanDetectionPolicy):\n def response_is_ban(self, request, response):\n # use default rules, but also consider HTTP 200 responses\n # a ban if there is 'captcha' word in response body.\n # ban = super(BanPolicy, self).response_is_ban(request, response)\n # ban = ban or response.status == 429\n # return ban\n\n return response.status == 429\n\n def exception_is_ban(self, request, exception):\n # override method completely: don't take exceptions in account\n return None","repo_name":"pathakumesh/Buy-Sell-Books","sub_path":"pipelines_amazon_category.py","file_name":"pipelines_amazon_category.py","file_ext":"py","file_size_in_byte":5223,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"}
+{"seq_id":"15253393959","text":"def task():\n ac, aj = map(int, input().split())\n times = []\n used = [0, 0]\n for i in range(ac):\n c, d = map(int, input().split())\n times.append((c, 1, 0))\n times.append((d, 0, 0))\n used[0] += d - c\n for i in range(aj):\n c, d = map(int, input().split())\n times.append((c, 1, 1))\n times.append((d, 0, 1))\n used[1] += d - c\n times = sorted(times)\n last_time, last_is_start, last_who = times[-1]\n last_time -= 24 * 60\n wiggle_room = 0\n needed_edges = 0\n elim = []\n for (time, is_start, who) in times:\n if is_start:\n if who != last_who:\n wiggle_room += time - last_time\n needed_edges += 1\n else:\n elim.append((time - last_time, who))\n last_time, last_is_start, last_who = time, is_start, who\n #print(times)\n #print(used, needed_edges, wiggle_room, elim)\n elim = sorted(elim)\n for length, who in elim:\n if length + used[who] <= 720:\n used[who] += length\n else:\n needed_edges += 2\n return needed_edges\n\nt = int(input())\nfor i in range(t):\n print(\"Case #{}: {}\".format(i+1, task()))\n","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/CodeJamData/17/32/18.py","file_name":"18.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"}
+{"seq_id":"4156197166","text":"from turtle import Turtle, Screen\r\nimport random\r\n\r\ntim = Turtle()\r\ntim.shape(\"turtle\")\r\n\r\ntim.speed(0)\r\ntim.pensize(5)\r\ncolors = ['red', 'blue', 'yellow', 'green', 'cyan', 'purple']\r\n\r\n\r\nproceed = True\r\nwhile proceed:\r\n move_choice = random.randint(20, 40)\r\n right_left = random.randint(1, 2)\r\n if right_left == 1:\r\n tim.color(random.choice(colors))\r\n tim.forward(move_choice)\r\n tim.right(90)\r\n elif right_left == 2:\r\n tim.color(random.choice(colors))\r\n tim.forward(move_choice)\r\n tim.left(90)\r\n\r\nscreen = Screen()\r\nscreen.exitonclick()\r\n","repo_name":"kaweckib/turtle-graphics-art","sub_path":"random_walk.py","file_name":"random_walk.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"38609948512","text":"from django.contrib.admin.views.decorators import staff_member_required\nfrom django.http import HttpResponse\nfrom django.views.generic import TemplateView\nfrom django.shortcuts import render, redirect\nfrom django import forms\n\nfrom Prepascience.form import *\nfrom comptes.models import *\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.conf import settings\n\n\ndef homepage(request):\n nbp = Projet.objects.all().count()\n nbm = Materiaux.objects.all().count()\n nbu = User.objects.all().count()\n return render(request, \"home.html\", {'nbm': nbm, 'nbp': nbp, 'nbu': nbu})\n\ndef aide(request):\n return render(request, \"aide.html\")\n\ndef apropos(request):\n return render(request, \"apropos.html\")\n\ndef profil(request):\n if request.user.is_authenticated :\n per = User.objects.filter(username__exact=request.user.username).get\n nbp = PersonneProjet.objects.filter(personne__exact=request.user).count()\n nbcp = Projet.objects.filter(chefProjet__exact=request.user).count()\n nbp = nbp + nbcp\n\n return render(request, \"profil.html\", {'per': per, 'nbp': nbp, 'nbcp': nbcp})\n else:\n return render(request, \"profil.html\")\n\n\n\"\"\"class LoginView(TemplateView):\n\n template_name = 'login.html'\n\n def post (self, request, **kwargs):\n username = request.POST.get('username', False)\n password = request.POST.get('password', False)\n user =authenticate(username=username, password=password)\n if user is not None and user.is_active:\n login(request, user)\n return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL )\n return render(request, self.template_name)\"\"\"\n\n\nclass demande(TemplateView):\n template_name = 'demande.html'\n\n def get(self, request):\n form = Demandeform()\n return render(request, self.template_name, {'form': form})\n\n def post(self, request):\n form = Demandeform(request.POST)\n if form.is_valid():\n form.save()\n\n form = Demandeform\n\n return render(request, self.template_name, {'form': form})\n\n\nclass ajout(TemplateView):\n template_name = 'ajout.html'\n\n def get(self, request):\n form = Ajoutform()\n return render(request, self.template_name, {'form': form})\n\n def post(self, request):\n form = Ajoutform(request.POST)\n if form.is_valid():\n form.save()\n\n form = Ajoutform\n\n return render(request, self.template_name, {'form': form})\n\n\ndef logout(request):\n logout(request)\n\n\ndef demandead(request):\n dem = Demande.objects.all()\n l = len(dem)\n return render(request, \"demandead.html\", {'dem': dem, 'l': l})\n\n\nclass ajoutProjet(TemplateView):\n template_name = 'creerProjet.html'\n\n def get(self, request):\n form = Projetform()\n return render(request, self.template_name, {'form': form})\n\n def post(self, request):\n form = Projetform(request.POST)\n type = request.POST.get('type')\n form.fields['type'].choices = [(type, type)]\n if form.is_valid():\n projet = form.save(commit=False)\n projet.chefProjet = request.user\n projet.save()\n\n form = Projetform()\n\n return render(request, self.template_name, {'form': form})\n\n\nclass ajoutProfil(TemplateView):\n template_name = \"creation.html\"\n\n def get(self, request):\n form = CreaProform()\n return render(request, self.template_name, {'form': form})\n\n def post(self, request):\n form = CreaProform(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.set_password(user.password)\n user.save()\n\n form = CreaProform()\n return render(request, self.template_name, {'form': form})\n\n\nclass projets(TemplateView):\n\n def get(self, request):\n form = AjoutCollab()\n if request.user.is_authenticated:\n pchef = Projet.objects.filter(chefProjet__exact=request.user)\n pcollab = PersonneProjet.objects.filter(personne__exact=request.user)\n nomcollab = PersonneProjet.objects.all()\n listeMat = ProjetMateriel.objects.all()\n return render(request, \"projet.html\",\n {'pchef': pchef, 'pcollab': pcollab, 'form': form, 'nomcollab': nomcollab,\n 'listeMat': listeMat})\n else:\n return render(request, \"projet.html\")\n\n def post(self, request):\n form = AjoutCollab(request.POST)\n nom = request.POST.get('nom')\n id = int(request.POST.get('i'))\n if form.is_valid():\n collaborateur = User.objects.filter(username__exact=nom)\n projet = Projet.objects.filter(id__exact=id).get()\n testDejaExistant = PersonneProjet.objects.filter(personne=collaborateur.get(), projet=projet)\n if testDejaExistant:\n return redirect('/projets')\n if not collaborateur:\n return redirect('/projets')\n else:\n b = form.save(commit=False)\n b.personne = collaborateur.get()\n b.projet = projet\n b.save()\n form = AjoutCollab()\n return redirect('/projets')\n\n\nclass materiaux(TemplateView):\n def get(self, request):\n addmat = AjoutMateriel()\n form = request.GET['form']\n if form == '':\n mat = Materiaux.objects.all()\n else:\n mat = Materiaux.objects.filter(nom__icontains=form)\n l = len(mat)\n if request.user.is_authenticated:\n projets = Projet.objects.filter(chefProjet=request.user)\n return render(request, \"materiaux.html\", {'mat': mat, 'l': l, 'addmat': addmat, 'projets': projets})\n else:\n return render(request, \"materiaux.html\", {'mat': mat, 'l': l, 'addmat': addmat})\n\n def post(self, request):\n addmat = AjoutMateriel(request.POST)\n idmat = request.POST.get('idmat')\n idproj = request.POST.get('idproj')\n if addmat.is_valid():\n projet = Projet.objects.filter(id__exact=idproj).get()\n materiel = Materiaux.objects.filter(id__exact=idmat).get()\n testDejaExistant = ProjetMateriel.objects.filter(projet=projet, materiaux=materiel)\n if testDejaExistant:\n return redirect('/materiel/?form=')\n else:\n lien = addmat.save(commit=False)\n lien.projet = projet\n lien.materiaux = materiel\n lien.save()\n addmat = AjoutMateriel()\n return redirect('/materiel/?form=')\n\n","repo_name":"Dylan-Helin/Prepascience","sub_path":"Prepascience/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"26616944060","text":"import numpy as np\nfrom numpy import linspace\nfrom scipy.integrate import solve_ivp\n\ninit_time = 0 # [s]\nfinal_time = 10.0 # [s]\nnum_data = 200\ntout = linspace(init_time, final_time, num_data)\n\nJ_inertia = np.array([[0.005, -0.001, 0.004],\n [-0.001, 0.006, -0.002],\n [0.004, -0.002, 0.004]])\nJ_inv = np.linalg.inv(J_inertia)\nJ_inv_J_inertia = np.vstack((J_inertia,J_inv))\n\nq0 = np.array([0,0,0,1])\nw0 = np.array([0,0,0])\n\nstate_0 = np.hstack((q0,w0))\n\ndef dqdt_attitude_kinematics(q_true, w_true):\n quat=q_true \n\n wx=np.array([[0, -w_true[2], w_true[1]],\n [w_true[2], 0, -w_true[0]],\n [-w_true[1], w_true[0], 0]])\n \n Omega_13 = np.hstack((-wx,np.resize(w_true,(3,1))))\n Omega_4 = np.hstack((-w_true,0))\n Omega = np.vstack((Omega_13, Omega_4))\n \n dqdt = 0.5*(Omega@quat)\n \n return dqdt\n\n\ndef dwdt_attitude_dynamics(w_true,J_inertia,inv_J_inertia, M_torque):\n\n Jw = J_inertia@w_true\n Jw_dot = -np.cross(w_true,Jw) + M_torque\n \n dwdt = inv_J_inertia@Jw_dot\n \n return dwdt\n\n\ndef dqdt_dwdt(time,state,J_inv_J_inertia):\n \n q_current = state[0:4]\n q_current = q_current/np.linalg.norm(q_current)\n w_current = state[4::]\n\n J_inertia = J_inv_J_inertia[0:3,:]\n J_inv = J_inv_J_inertia[3::,:]\n\n M_torque = np.array([0.00001+0.0005*np.sin(2*time), \n -0.00002+0.0001*np.cos(0.75*time), \n -0.0001])\n \n dqdt = dqdt_attitude_kinematics(q_current, w_current)\n dwdt = dwdt_attitude_dynamics(w_current, J_inertia, J_inv, M_torque)\n \n dstate_dt = np.hstack((dqdt,dwdt))\n return dstate_dt\n\nsol = solve_ivp(dqdt_dwdt, (init_time, final_time), state_0, t_eval=tout, \n rtol=1e-6,atol=1e-9, max_step=0.01, args=(J_inv_J_inertia,))\nqout = sol.y[0:4,:]\nwout = sol.y[4::,:]\n\n\nimport matplotlib.pyplot as plt\n\nfig, (ax,ax1) = plt.subplots(nrows=2,ncols=1)\nax.plot(tout,qout[0,:],'b-',tout,qout[1,:],'r--',tout,qout[2,:],'g-.',tout,qout[3,:],'m:')\n\nfig.set_figheight(6) # size in inches\nfig.set_figwidth(8) # size in inches\n\nxtick_list = np.array([0,1,2,3,4,5,6,7,8,9,10])\nax.set_xticks(xtick_list)\nax.set_xticklabels(xtick_list,fontsize=14)\n\nytick_list = np.array([-1.0,-0.5,0.0,0.5,1.0])\nax.set_yticks(ytick_list)\nax.set_yticklabels(ytick_list,fontsize=14)\n\nax.legend(('$q_1$','$q_2$','$q_3$','$q_4$'),fontsize=14, loc='upper right')\nax.axis((0,10,-1.0,1.0))\nax.set_ylabel('quaternion',fontsize=14)\n\nax1.plot(tout,wout[0,:],'r-',tout,wout[1,:],'b--',tout,wout[2,:],'m-.')\nax1.set_xticks(xtick_list)\nax1.set_xticklabels(xtick_list,fontsize=14)\nax1.set_xlabel('time [s]',fontsize=14)\n\nytick_list = np.array([-2.0,-1.0,0.0,1.0,2.0])\nax1.set_yticks(ytick_list)\nax1.set_yticklabels(ytick_list,fontsize=14)\n\nax1.legend(('$\\omega_1$','$\\omega_2$','$\\omega_3$'),fontsize=14, loc='upper right')\nax1.axis((0,10,-2.0,2.0))\nax1.set_xlabel('time [s]',fontsize=14)\nax1.set_ylabel('$\\omega$ [rad/s]',fontsize=14)\n\n#fig.set_size_inches(9,6) \n#fig.savefig('dwdt_dqdt_solve_Mt.pdf',dpi=250)\n","repo_name":"myjr52/dynsys.matlab.python","sub_path":"python/python_2_8_dJwdt_attitude_dynamics.py","file_name":"python_2_8_dJwdt_attitude_dynamics.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"72"}
+{"seq_id":"31243668734","text":"import copy\nimport json\nimport os\nimport time\nimport subprocess\n\nimport gpsuploader.gpsuploader as gpsu\n\n\ndef call_command(commands):\n cmd = []\n for command in commands:\n cmd.append(command)\n subprocess.call(cmd)\n output = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n return output.communicate()[0]\n\n\ndef test_getgps():\n json_data = r\"{\\\"age\\\":0, \\\"latitude\\\":\\\"43.8616\\\", \\\"longitude\\\":\\\"-79.3854\\\", \\\"elevation\\\":\\\"184.0\\\", \\\"course\\\":\\\"\\\", \\\"speed\\\":\\\"N\\\"}\"\n want_json_data = '{\"age\":0, \"latitude\":\"43.8616\", \"longitude\":\"-79.3854\", \"elevation\":\"184.0\", \"course\":\"\", \"speed\":\"N\"}'\n cmd = \"echo {0}\".format(json_data)\n assert gpsu.get_gps(cmd) == json.loads(want_json_data)\n\n\ndef test_add_record():\n key = \"add_key\"\n value = \"add_value\"\n d = {\n \"age\": 0,\n \"latitude\": \"43.8616\",\n \"longitude\": \"-79.3854\",\n \"elevation\": \"184.0\",\n \"course\": \"\",\n \"speed\": \"N\",\n }\n want_d = copy.copy(d)\n want_d[key] = value\n assert gpsu.add_record(d, key, value) == want_d\n\n\ndef test_check_signal():\n tests = [\n {\n \"case\": \"signal_true\",\n \"input\": {\n \"age\": 0,\n \"latitude\": \"43.8616\",\n \"longitude\": \"-79.3854\",\n \"elevation\": \"184.0\",\n \"course\": \"\",\n \"speed\": \"N\",\n },\n \"want\": {\n \"age\": 0,\n \"latitude\": \"43.8616\",\n \"longitude\": \"-79.3854\",\n \"elevation\": \"184.0\",\n \"course\": \"\",\n \"speed\": \"N\",\n \"signal\": \"true\",\n },\n },\n {\n \"case\": \"signal_false\",\n \"input\": {\n \"signal\": \"false\",\n },\n \"want\": {\n \"signal\": \"false\",\n },\n },\n ]\n for t in tests:\n print()\n print(\"case: {0}\".format(t[\"case\"]))\n assert gpsu.check_signal(t[\"input\"]) == t[\"want\"]\n\n\ndef test_put_item():\n print()\n stream_name = \"local_gps_omega2plus\"\n endpoint_url = \"http://localhost:4566\"\n aws_region = \"ap-northeast-1\"\n aws_access_key_id = \"dummy\"\n aws_secret_access_key = \"dummy\"\n setup_cmds = [\n \"aws kinesis --profile local create-stream --stream-name {0} --shard-count 1 --endpoint-url {1}\".format(\n stream_name, endpoint_url\n )\n ]\n for cmd in setup_cmds:\n print(cmd)\n os.system(cmd)\n # wait for localstack kinesis ACTIVE\n wait_cmd = \"aws kinesis --profile local describe-stream --stream-name {0} --endpoint-url {1}\".format(\n stream_name, endpoint_url\n )\n\n for i in range(10):\n print(wait_cmd)\n jsons = call_command(wait_cmd.split(\" \"))\n s = json.loads(jsons)\n if \"ACTIVE\" == s[\"StreamDescription\"][\"StreamStatus\"]:\n break\n time.sleep(1)\n\n gpsu.put_item(\n stream_name,\n \"test_data\",\n \"123\",\n aws_region,\n aws_access_key_id,\n aws_secret_access_key,\n endpoint_url,\n )\n\n end_cmds = [\n \"aws kinesis --profile local delete-stream --stream-name {0} --endpoint-url {1}\".format(\n stream_name, endpoint_url, aws_region\n )\n ]\n for cmd in end_cmds:\n print(cmd)\n os.system(cmd)\n","repo_name":"montblanc18/gps-viewer","sub_path":"device/gpsuploader/tests/test_gpsuploader.py","file_name":"test_gpsuploader.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"22044853186","text":"import uuid\n\nfrom django.contrib.postgres.indexes import GinIndex\nfrom django.contrib.postgres.search import SearchVectorField\nfrom django.db import models\nfrom model_utils.models import TimeStampedModel\nfrom tinymce.models import HTMLField\n\n\nclass Campaign(TimeStampedModel):\n name = models.CharField(max_length=255)\n description = HTMLField(blank=True)\n image = models.ImageField(upload_to=\"campaigns/\", blank=True)\n dm = models.ForeignKey(\n \"users.User\",\n on_delete=models.CASCADE,\n related_name=\"dm_in_campaigns\",\n )\n invite_code = models.UUIDField(\n unique=True, null=True\n ) # Used to add player's to a Campaign.\n vector_column = SearchVectorField(null=True)\n\n def __str__(self) -> str:\n return self.name\n\n def __repr__(self) -> str:\n return f\"
\"\n\n def get_absolute_url(self) -> str:\n from django.urls import reverse\n\n return reverse(\"campaigns:detail\", kwargs={\"campaign_pk\": self.pk})\n\n @staticmethod\n def _generate_invite_code() -> uuid.UUID:\n return uuid.uuid4()\n\n class Meta:\n verbose_name = \"Campaign\"\n verbose_name_plural = \"Campaigns\"\n indexes = (GinIndex(fields=[\"vector_column\"]), models.Index(fields=[\"name\"]))\n\n def save(self, *args, **kwargs) -> None:\n \"\"\"Set the invite code if it doesn't exist yet.\"\"\"\n if not self.pk or not self.invite_code:\n self.invite_code = self._generate_invite_code()\n super().save(*args, **kwargs)\n","repo_name":"wlansu/Campaign-Alchemy","sub_path":"apps/campaigns/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"37705406505","text":"from detector import * \r\nimport cv2 as cv\r\nfrom gtts import gTTS\r\nimport os\r\n\r\nlanguage = 'en'\r\n\r\n \r\ndef textTospeech(text) :\r\n myobj = gTTS(text=text, lang=language, slow=False)\r\n print(text)\r\n myobj.save(\"text.mp3\")\r\n os.system(\"text.mp3\")\r\n\r\n\r\n# imageLists = ['cat-dog','fruits',\"street\",\"pizza\",\"room\"]\r\nimageLists = [\"street\"]\r\n\r\ndetctor = Detector(0)\r\nfor imgName in imageLists :\r\n img = cv.imread(\"images/{}.jpg\".format(imgName))\r\n img,data = detctor.detect(img)\r\n textTospeech(f\"image {imgName} have items {data}\")\r\n # cv.imshow(imgName , img)\r\n \r\n# cv.waitKey()\r\n\r\n\r\n\r\n \r\n\r\n","repo_name":"engabdallahassem/AI-animal-Production-using-ESPCAM","sub_path":"object_detection/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"30879367234","text":"import csv, sys, re\n\n# input error handling; must specify input file number\nif len(sys.argv) is not 2:\n sys.exit('Please specify an input file number (0-9).')\n\n# get file number from input arguments\nfile_num = sys.argv[1]\n\n# increase csv field limit\ncsv.field_size_limit(sys.maxsize)\n\ndef extract_comments(contents, language):\n comments = []\n comment_delims = ['#'] if language == 'py' else ['//', '/*', '*']\n # iterate through lines in file contents, checking for comments\n for line in contents.split('\\n'):\n line_trimmed = line.strip()\n for delim in comment_delims:\n # output the line with the comment deliminator removed\n if line_trimmed.startswith(delim):\n comment = line_trimmed.split(delim)[1]\n # ensure that this comments contains letters\n if re.search('[a-zA-Z]', comment) is not None:\n comments.append(comment.strip())\n return comments\n\nwith open(f'../data/files/file_contents_{file_num}.csv', 'r') as file, \\\n open(f'../data/comments/comments_{file_num}.csv', 'w') as output:\n # null bytes error handling\n r = csv.reader(line.replace('\\0', '') for line in file)\n header = True\n\n # make writer and write header row\n w = csv.writer(output)\n w.writerow(['prog_lang', 'country_code', 'comment'])\n\n for row in r:\n # skip first header row\n if header:\n header = False\n continue\n\n # get programming language from file name\n file_name = row[0]\n language = file_name.split('.')[-1]\n\n # extract comments from file\n contents = row[1]\n comments = extract_comments(contents, language)\n\n # write comments to output csv, specifying programming language and country\n country = row[2]\n for comment in comments:\n w.writerow([language, country, comment])\n","repo_name":"alexwgraves/francais-informatique","sub_path":"analysis/extract_comments.py","file_name":"extract_comments.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"17242495720","text":"from flask import Flask, request, redirect, render_template, session, flash\nfrom flask_sqlalchemy import SQLAlchemy\n\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://build-a-blog:root@localhost:8889/build-a-blog'\napp.config['SQLALCHEMY_ECHO'] = True\ndb = SQLAlchemy(app)\napp.secret_key = 'root' #should be a better key for security purposes, but oh well, right?\n\n\nclass Blog(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(120))\n body = db.Column(db.String(1200))\n\n def __init__(self,title,body):\n self.title = title\n self.body = body\n\n\n@app.route('/newpost', methods=['POST', 'GET'])\ndef newpost():\n #Still commits everything to database, rearrange if statements to make this work correctly.\n title_error = \"\"\n body_error = \"\"\n if request.method == 'POST':\n blog_title = request.form['blog_title']\n blog_body = request.form['blog_body']\n if len(blog_title) < 1:\n title_error = \"Please enter a Title for your Blog.\"\n if len(blog_body) < 1:\n body_error = \"Please enter a Body for your Blog.\"\n if not title_error and not body_error:\n new_entry = Blog(blog_title, blog_body)\n db.session.add(new_entry)\n db.session.commit()\n return redirect('/blog?id='+str(new_entry.id)) #Accesses id attribute\n else:\n return render_template('/newpost.html', blog_title=blog_title,\n blog_body=blog_body, title_error=title_error,\n body_error=body_error)\n\n return render_template('newpost.html')\n\n\n\n@app.route('/blog', methods=['POST', 'GET'])\ndef blog():\n id_exists = request.args.get('id')\n if id_exists:\n individual_entry = Blog.query.get(id_exists)\n return render_template('/singlepost.html', individual_entry=individual_entry)\n else:\n entries = Blog.query.all()\n return render_template('blog.html', entries=entries)\n\n\n@app.route('/', methods=['POST', 'GET'])\ndef index():\n entries = Blog.query.all()\n return render_template('blog.html', entries=entries)\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"bdburns6389/build-a-blog","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"18810978317","text":"import sys\r\nsys.setrecursionlimit(10000000)\r\ninput = sys.stdin.readline\r\nN = int(input().rstrip())\r\nw = 0\r\nb = 0\r\ngraph = [list(map(int,input().split()))for _ in range(N)]\r\ndef solution(G):\r\n global w, b\r\n\r\n n = len(G)\r\n if len(G) == 1 and len(G[0]) == 1:\r\n if G[0][0] == 1:\r\n b+=1\r\n else:\r\n w+=1\r\n return\r\n if checkgraph(G):\r\n if G[0][0] == 0:\r\n w+=1\r\n else:\r\n b+=1\r\n return\r\n solution([row[0:n//2] for row in G[0:n//2]])\r\n solution([row[n//2:n] for row in G[0:n//2]])\r\n solution([row[0:n//2] for row in G[n//2:n]])\r\n solution([row[n//2:n] for row in G[n//2:n]])\r\n return\r\n\r\ndef checkgraph(G):\r\n temp = G[0][0]\r\n for i in range(len(G)):\r\n for j in range(len(G[0])):\r\n if G[i][j] != temp:\r\n return False\r\n return True\r\n\r\nsolution(graph)\r\nprint(w)\r\nprint(b)","repo_name":"ukjinlee66/PS","sub_path":"백준/Silver/2630. 색종이 만들기/색종이 만들기.py","file_name":"색종이 만들기.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"2796157290","text":"import ctypes\nimport logging\nimport os\nimport os.path\nfrom typing import Optional, Any, List\n\nimport cv2\nimport numpy as np\nfrom PyQt5.Qt import Qt\nfrom PyQt5.QtCore import QPoint, QTimer\nfrom PyQt5.QtGui import QImage, QPainter\nfrom PyQt5.QtWidgets import QMainWindow, QDesktopWidget\nfrom desktopmagic.screengrab_win32 import getRectAsImage\nfrom win32gui import GetWindowText, GetForegroundWindow, GetClientRect, ClientToScreen, FindWindow\n\nfrom hotsdraft_overlay.models import Rect, Point\nfrom hotsdraft_overlay.painting import PaintCommand\n\n\nclass BaseCanvas(QMainWindow):\n def __init__(self):\n ctypes.windll.user32.SetProcessDPIAware()\n super().__init__()\n self.__paint_commands = []\n self.init()\n self.showMaximized()\n self.activateWindow()\n\n def init(self):\n raise NotImplemented()\n\n def capture(self):\n raise NotImplemented()\n\n def execute_paint_commands(self, paint_commands: List[PaintCommand]):\n self.__paint_commands = paint_commands\n self.repaint()\n\n def clear_paint_commands(self):\n self.__paint_commands = []\n self.repaint()\n\n def paintEvent(self, e):\n painter = QPainter(self)\n for command in self.__paint_commands:\n painter.save()\n command.paint(painter)\n painter.restore()\n super().paintEvent(e)\n\n\nclass WindowCanvas(BaseCanvas):\n def __init__(self, window_name):\n super().__init__()\n self.__window_name = window_name\n self.__last_rect = None\n\n def init(self):\n self.setAttribute(Qt.WA_TranslucentBackground)\n self.setAttribute(Qt.WA_TransparentForMouseEvents)\n self.setWindowFlag(Qt.WindowStaysOnTopHint)\n self.setWindowFlag(Qt.FramelessWindowHint)\n self.setWindowFlag(Qt.Tool)\n\n def capture(self) -> Optional[Any]:\n hwnd = GetForegroundWindow()\n if hwnd:\n if GetWindowText(hwnd) == self.__window_name:\n rect = self.__get_handle_rect(hwnd)\n self.__maybe_align(rect)\n screen_shot = getRectAsImage(rect.tuple)\n return cv2.cvtColor(np.array(screen_shot), cv2.COLOR_RGB2BGR)\n return None\n\n def paintEvent(self, e):\n self.__align_to_target_window()\n super().paintEvent(e)\n\n def __align_to_target_window(self):\n hwnd = FindWindow(0, self.__window_name)\n if hwnd:\n rect = self.__get_handle_rect(hwnd)\n self.__maybe_align(rect)\n\n def __maybe_align(self, rect: Rect):\n if rect == self.__last_rect:\n return\n\n self.__last_rect = rect\n # Run in UI thread\n QTimer.singleShot(0, self.__adjust_geometry)\n\n def __adjust_geometry(self):\n logging.debug(\"Moving overlay to %s\" % self.__last_rect)\n self.setGeometry(self.__last_rect.qrect)\n self.updateGeometry()\n\n @staticmethod\n def __get_handle_rect(hwnd) -> Rect:\n # Get the size of the rectangle\n x, y, x1, y1 = GetClientRect(hwnd)\n # Get the position of the rectangle top corner on screen.\n x, y = ClientToScreen(hwnd, (x, y))\n # Move the bottom right corner by the offset\n x1 += x\n y1 += y\n return Rect(Point(x, y), Point(x1, y1))\n\n\nclass ScreenshotCanvas(BaseCanvas):\n def __init__(self, directory):\n self.__desktop_size = QDesktopWidget().screenGeometry().size()\n super().__init__()\n self.__screenshots = [\n os.path.join(directory, file)\n for file in os.listdir(directory)\n if os.path.isfile(os.path.join(directory, file))\n ]\n self.__current_image = None\n\n def init(self):\n self.setWindowTitle(\"Screenshot preview\")\n self.setBaseSize(self.__desktop_size)\n\n def capture(self) -> Optional[Any]:\n if not self.__screenshots:\n self.__current_image = None\n self.__set_title(\"No more images left\")\n else:\n path = self.__screenshots.pop(0)\n logging.debug(\"Providing screenshot %s\", path)\n self.__set_title(\"Preview %s\" % path)\n cv_image = cv2.imread(path)\n\n img_h, img_w = cv_image.shape[:2]\n window_h, window_w = self.size().height(), self.size().width()\n\n biggest_ratio = max(float(img_h) / window_h, float(img_w) / window_w)\n if biggest_ratio > 1:\n cv_image = cv2.resize(cv_image, None, fx=1.0 / biggest_ratio, fy=1.0 / biggest_ratio,\n interpolation=cv2.INTER_AREA)\n\n self.__current_image = cv_image\n self.repaint()\n\n return self.__current_image\n\n def __set_title(self, title):\n # Needs to run on UI thread\n QTimer.singleShot(0, lambda t=title: self.setWindowTitle(t))\n\n def paintEvent(self, e):\n if self.__current_image is not None:\n img = self.__current_image\n painter = QPainter(self)\n qimg = QImage(img, img.shape[1], img.shape[0], img.shape[1] * 3, QImage.Format_RGB888).rgbSwapped()\n painter.drawImage(QPoint(0, 0), qimg)\n super().paintEvent(e)\n","repo_name":"AudriusButkevicius/hotsdraft-overlay","sub_path":"hotsdraft_overlay/canvas.py","file_name":"canvas.py","file_ext":"py","file_size_in_byte":5173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"16831684210","text":"from bs4 import BeautifulSoup\nimport pandas as pd\nimport numpy as np\nimport requests\nimport json\nimport nltk\nimport re\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\n\nlinks = pd.read_excel(\"C:\\\\Users\\\\Manish\\\\OneDrive\\\\Desktop\\\\Blackcoffer\\\\Input.xlsx\")\nlinks.head()\n\nstop_words = []\nwith open('C:\\\\Users\\\\Manish\\\\OneDrive\\\\Desktop\\\\Blackcoffer\\\\StopWords_Auditor.txt') as f:\n content = f.read().strip()\n stop_words += content.split(\"\\n\")\n\nwith open('C:\\\\Users\\\\Manish\\\\OneDrive\\\\Desktop\\\\Blackcoffer\\\\StopWords_Currencies.txt') as f:\n content = f.read().strip()\n stop_words += content.split(\"\\n\")\n\nwith open('C:\\\\Users\\\\Manish\\\\OneDrive\\\\Desktop\\\\Blackcoffer\\\\StopWords_DatesandNumbers.txt') as f:\n content = f.read().strip()\n stop_words += content.split(\"\\n\")\n\nwith open('C:\\\\Users\\\\Manish\\\\OneDrive\\\\Desktop\\\\Blackcoffer\\\\StopWords_Generic.txt') as f:\n content = f.read().strip()\n stop_words += content.split(\"\\n\")\n\nwith open('C:\\\\Users\\\\Manish\\\\OneDrive\\\\Desktop\\\\Blackcoffer\\\\StopWords_GenericLong.txt') as f:\n content = f.read().strip()\n stop_words += content.split(\"\\n\")\n\nwith open('C:\\\\Users\\\\Manish\\\\OneDrive\\\\Desktop\\\\Blackcoffer\\\\StopWords_Geographic.txt') as f:\n content = f.read().strip()\n stop_words += content.split(\"\\n\")\n\nwith open('C:\\\\Users\\\\Manish\\\\OneDrive\\\\Desktop\\\\Blackcoffer\\\\StopWords_Names.txt') as f:\n content = f.read().strip()\n stop_words += content.split(\"\\n\")\n\nstop_words = [i.lower() for i in stop_words]\n\npositive_words = []\nwith open('C:\\\\Users\\\\Manish\\\\OneDrive\\\\Desktop\\\\Blackcoffer\\\\positive-words.txt') as f:\n content = f.read().strip()\n positive_words += content.split(\"\\n\")\n\nnegative_words = []\nwith open('C:\\\\Users\\\\Manish\\\\OneDrive\\\\Desktop\\\\Blackcoffer\\\\negative-words.txt') as f:\n content = f.read().strip()\n negative_words += content.split(\"\\n\")\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0',\n}\n\nprint(len(links.URL))\n\noutput_df = pd.DataFrame(links)\nprint(output_df)\nPOSITIVESCORE = []\nNEGATIVESCORE = []\nPOLARITYSCORE = []\nSUBJECTIVITYSCORE = []\nAVGSENTENCELENGTH = []\nPERCENTAGEOFCOMPLEXWORDS = []\nFOGINDEX = []\nAVGNUMBEROFWORDSPERSENTENCE = []\nCOMPLEXWORDCOUNT = []\nWORDCOUNT = []\nSYLLABLEPERWORD = []\nPERSONALPRONOUNS = []\nAVGWORDLENGTH = []\n\n\ndef count_syllables(word):\n return len(re.findall('(?!e$)[aeiouy]+', word, re.I) + re.findall('^[^aeiouy]*e$', word, re.I))\n\n\ndef load_data(url, url_id):\n try:\n\n cmc = requests.get(url, headers=headers)\n soup = BeautifulSoup(cmc.content, 'html.parser')\n title_tag = soup.find(\"h1\", attrs={\"class\": 'entry-title'})\n article_tag = soup.find('div', {'class': 'td-post-content'})\n title = title_tag.get_text()\n article_p = article_tag.find_all(\"p\")\n article = \" \".join([p.get_text() for p in article_p])\n\n title_text = title + \" \" + article\n\n cleaned = []\n txt_file = open(f'Extracted Data/{url_id}.txt', 'w+', encoding=\"utf-8\")\n txt_file.write(title)\n txt_file.write(article)\n txt_file.close()\n\n [i if i.lower() in stop_words else cleaned.append(i) for i in word_tokenize(title_text)]\n\n cleaned_n = []\n stops = set(stopwords.words('english'))\n for i in word_tokenize(title_text):\n if i not in stops:\n cleaned_n.append(i)\n\n positive_score = 0\n negative_score = 0\n total_words = len(word_tokenize(title_text))\n Total_Words_after_cleaning = len(cleaned)\n sentences = nltk.sent_tokenize(title_text)\n total_sentences = len(sentences)\n\n complex_words = []\n for i in word_tokenize(title_text):\n if count_syllables(i.lower()) > 1:\n complex_words.append(i)\n complex_word_count = len(complex_words)\n\n for i in cleaned:\n if i.lower() in positive_words:\n positive_score += 1\n\n if i.lower() in negative_words:\n negative_score += 1\n POSITIVESCORE.append(positive_score)\n NEGATIVESCORE.append(negative_score)\n\n polarity_score = (positive_score - negative_score) / ((positive_score + negative_score) + 0.000001)\n POLARITYSCORE.append(polarity_score)\n\n subjectivity_score = (polarity_score + negative_score) / ((Total_Words_after_cleaning) + 0.000001)\n SUBJECTIVITYSCORE.append(subjectivity_score)\n\n average_sentence_length = total_words / total_sentences\n AVGSENTENCELENGTH.append(average_sentence_length)\n\n percentage_of_complex_words = complex_word_count / total_words\n PERCENTAGEOFCOMPLEXWORDS.append(percentage_of_complex_words)\n\n Fog_Index = 0.4 * (average_sentence_length + percentage_of_complex_words)\n FOGINDEX.append(Fog_Index)\n\n average_number_of_words_per_sentence = average_sentence_length\n AVGNUMBEROFWORDSPERSENTENCE.append(average_number_of_words_per_sentence)\n\n COMPLEXWORDCOUNT.append(complex_word_count)\n\n word_count_list = []\n\n punc = '''!()-[]{};:'\"\\,<>./?@#$%^&*_~'''\n for i in range(len(cleaned_n)):\n if cleaned_n[i] not in punc:\n word_count_list.append(cleaned_n[i])\n word_count = len(word_count_list)\n WORDCOUNT.append(word_count)\n\n total_syllable = 0\n for i in word_tokenize(title_text):\n total_syllable += count_syllables(i)\n\n SYLLABLEPERWORD.append(total_syllable)\n\n pronouns = [\"I\", \"i\", \"We\", \"we\", \"My\", \"my\", \"Ours\", \"ours\", \"Us\", \"us\"]\n pronouns_count = 0\n\n for i in pronouns:\n match = re.findall(i, title_text)\n pronouns_count += len(match)\n\n PERSONALPRONOUNS.append(pronouns_count)\n\n total_char = re.findall(\"\\w\", title_text)\n average_word_length = len(total_char) / len(word_tokenize(title_text))\n AVGWORDLENGTH.append(average_word_length)\n print(\n positive_score,\n negative_score,\n polarity_score,\n subjectivity_score,\n average_sentence_length,\n percentage_of_complex_words,\n Fog_Index,\n average_number_of_words_per_sentence,\n complex_word_count,\n word_count,\n total_syllable,\n pronouns_count,\n average_word_length,\n )\n except:\n POSITIVESCORE.append(0)\n NEGATIVESCORE.append(0)\n POLARITYSCORE.append(0)\n SUBJECTIVITYSCORE.append(0)\n AVGSENTENCELENGTH.append(0)\n PERCENTAGEOFCOMPLEXWORDS.append(0)\n FOGINDEX.append(0)\n AVGNUMBEROFWORDSPERSENTENCE.append(0)\n COMPLEXWORDCOUNT.append(0)\n WORDCOUNT.append(0)\n SYLLABLEPERWORD.append(0)\n PERSONALPRONOUNS.append(0)\n AVGWORDLENGTH.append(0)\n\n\n[load_data(links.URL[i], links.URL_ID[i]) for i in range(len(links))]\n\nprint(\n len(POSITIVESCORE),\n len(NEGATIVESCORE),\n len(POLARITYSCORE),\n len(SUBJECTIVITYSCORE),\n len(AVGSENTENCELENGTH),\n len(PERCENTAGEOFCOMPLEXWORDS),\n len(FOGINDEX),\n len(AVGNUMBEROFWORDSPERSENTENCE),\n len(COMPLEXWORDCOUNT),\n len(WORDCOUNT),\n len(SYLLABLEPERWORD),\n len(PERSONALPRONOUNS),\n len(AVGWORDLENGTH),\n)\n\noutput_df['POSITIVE SCORE'] = POSITIVESCORE\noutput_df['NEGATIVE SCORE'] = NEGATIVESCORE\noutput_df['POLARITY SCORE'] = POLARITYSCORE\noutput_df['SUBJECTIVITY SCORE'] = SUBJECTIVITYSCORE\noutput_df['AVG SENTENCE LENGTH'] = AVGSENTENCELENGTH\noutput_df['PERCENTAGE OF COMPLEX WORDS'] = PERCENTAGEOFCOMPLEXWORDS\noutput_df['FOG INDEX'] = FOGINDEX\noutput_df['AVG NUMBER OF WORDS PER SENTENCE'] = AVGNUMBEROFWORDSPERSENTENCE\noutput_df['COMPLEX WORD COUNT'] = COMPLEXWORDCOUNT\noutput_df['WORD COUNT'] = WORDCOUNT\noutput_df['SYLLABLE PER WORD'] = SYLLABLEPERWORD\noutput_df['PERSONAL PRONOUNS'] = PERSONALPRONOUNS\noutput_df['AVG WORD LENGTH'] = AVGWORDLENGTH\n\noutput_df.to_csv(\"output.csv\", index=False)","repo_name":"Nitish-Kumar-Bote/Data-Science","sub_path":"python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":7983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"37382611889","text":"import requests\nimport asyncio\nimport multiprocessing.dummy as mpd\n\nthrottled = False\nURL = \"https://instructor-warn.herokuapp.com/warn\"\n\npool = mpd.Pool(10)\n\nasync def warningPing():\n global throttled\n global URL\n\n if not throttled:\n try:\n pool.apply_async(requests.get, [ URL ])\n # requests.get(url)\n throttled = True\n await asyncio.sleep(10)\n except:\n print(\"Unable to ping\")\n finally:\n throttled = False","repo_name":"dwvn03/instructor_warn","sub_path":"py_client/adapter.py","file_name":"adapter.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"9474155099","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 2 17:30:47 2018\n\n@author: malrawi\n\"\"\"\n\n\"\"\"\ndataset fusion based on:\n https://github.com/xingyizhou/pytorch-pose-hg-3d/blob/master/src/datasets/fusion.py \n\"\"\"\n\nimport torch.utils.data as data\nfrom datasets.load_ifnenit_dataset import IfnEnitDataset\nfrom datasets.load_iam_dataset import IAM_words\nfrom datasets.load_iam_train_valid_dataset import iam_train_valid_combined_dataset\nimport numpy as np\n\n'''\n- Args\nFirst, load the train set by the default values, except (train and transform), \nthis wil result in train_set, where the idx's are stored for each IFN and WG, \nthen, to load the test set, these idx's are estimated randomly according to \ncf.split_percentage in config_file_wg.py. \n We then can use these idx's (train_set.data_idx_WG,) to load the complement data and get \nthe test set for each of IFN and IAM.\n\nExample:\ntrain_set = IAM_IFN_Dataset(cf, train=True, mode='train', transform=image_transfrom)\ntest_set = IAM_IFN_Dataset(cf, train=False, mode='test or validate' transform=image_transfrom, \n data_idx_WG = train_set.data_idx_WG, \n data_idx_IAM = train_set.data_idx_IFN, \n complement_idx = True)\n \n==\n'''\nclass IAM_IFN_Dataset(data.Dataset):\n def __init__(self, cf, train=True, mode = 'train', transform=None, \n data_idx_IAM = np.arange(1), \n data_idx_IFN = np.arange(1), \n complement_idx=False):\n self.train = train # training set or test set \n self.mode = mode\n if len(data_idx_IFN)==1:\n self.datasetIFN = IfnEnitDataset(cf, train=self.train, transform = transform)\n else:\n self.datasetIFN = IfnEnitDataset(cf, train=self.train, transform = transform,\n data_idx = data_idx_IFN, complement_idx = True)\n if len(data_idx_IAM)==1: \n if mode == 'train':\n self.datasetIAM = iam_train_valid_combined_dataset(cf, train=True, transform = transform) # mode is one of train, test, or validate \n else: \n assert(mode == 'test')\n self.datasetIAM = IAM_words(cf, mode = self.mode, transform = transform) \n \n else:\n # this is deprecated for IAM dataset as we are splitting based on train, validate, and test folders\n # self.datasetIAM = IAM_words(cf, mode = self.mode, transform = transform)\n print('Deprecated by Rawi, as the split is based on train, validate and test')\n\n self.data_idx_IFN = self.datasetIFN.data_idx # this is needed, to be passed from one set to another\n # self.data_idx_IAM = self.datasetIAM.data_idx # this is needed, to be passed from one set to another\n \n def add_weights_of_words(self): # weights to balance the loss, if the data is unbalanced \n self.datasetIFN.add_weights_of_words()\n self.datasetIAM.add_weights_of_words()\n\n def num_classes(self):\n return self.datasetIAM.num_classes() #IFN and WG have the same phoc size\n\n def __getitem__(self, index):\n \n if index < len(self.datasetIFN):\n return self.datasetIFN[index]\n else:\n return self.datasetIAM[index - len(self.datasetIFN)] # check: are we skipping a sample here?\n\n def __len__(self):\n return len(self.datasetIFN) + len(self.datasetIAM)\n","repo_name":"morawi/MLPHOC","sub_path":"datasets/load_IAM_IFN_dataset.py","file_name":"load_IAM_IFN_dataset.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"}
+{"seq_id":"8458838809","text":"from babel.dates import format_datetime\nfrom google.appengine.ext import deferred\nfrom pyquery import PyQuery\nimport mock\nimport unittest2\n\nfrom library import constants\nfrom library.constants import email\nfrom forms import error_messages\nfrom library import testing\nfrom models.funding_source import FundingSource\nfrom models.profile import Profile\nfrom models.transaction import Transaction\n\n\nclass TestTransactionHandler(testing.TestCase, unittest2.TestCase):\n\n def test_view_add_credit_page_not_logged_in(self):\n self.assertNotLoggedIn()\n response = self.app.get(self.uri_for('transaction.add_credit'))\n redirect_url = self.uri_for(\n 'login', redirect=self.uri_for('transaction.add_credit'))\n self.assertRedirects(response, redirect_url)\n\n @testing.logged_in\n def test_view_add_credit_page_logged_in(self):\n response = self.app.get(self.uri_for('transaction.add_credit'))\n self.assertOk(response)\n self.assertLength(1, response.pyquery('table.funding-sources'))\n # Check that add credit button is present.\n self.assertLength(1, response.pyquery('.span12 button'))\n self.assertTemplateUsed('transaction_add_credit.haml')\n\n @testing.logged_in\n def test_add_credit_transaction_added_to_queue(self):\n funding_source = self.create_funding_source(\n parent=self.get_current_profile(),\n status=FundingSource.Status.Accepted)\n\n params = {'amount': '20.00', 'funding_source': str(funding_source.key())}\n response = self.app.post(self.uri_for('transaction.add_credit'), params)\n self.assertRedirects(response, self.uri_for('home'))\n\n # Check that the tranasction processing job has been added to the queue.\n tasks = self.taskqueue_stub.get_filtered_tasks(\n url=self.uri_for('transaction.process'))\n self.assertLength(1, tasks)\n\n # Check that the transaction has been created.\n self.assertLength(1, Transaction.all())\n\n # Check that the transaction has a task queued to process it.\n task, = tasks\n params = task.extract_params()\n transaction = Transaction.all().get()\n self.assertEqual(str(transaction.key()), params['transaction_key'])\n self.assertEqual(self.uri_for('transaction.process'), task.url)\n\n # Check that the transaction starts with the correct status.\n self.assertEqual(transaction.Status.Pending, transaction.status)\n\n # Run the transaction processing job and verify that it is successful.\n with mock.patch('stripe.Charge.create') as stripe_charge:\n stripe_charge.return_value = mock.Mock(id='ch_1stsWjqBqYSOtr')\n response = self.app.post(task.url, params,\n headers=self.TASKQUEUE_HEADERS)\n self.assertOk(response)\n\n # Reload transaction from the data store and ensure the status changed.\n transaction, = Transaction.all()\n self.assertEqual(transaction.Status.Completed, transaction.status)\n\n # Reload the profile from the data store and check that the balance has\n # been updated.\n profile = Profile.get(self.current_profile.key())\n self.assertEqual(2000, profile.usd_balance)\n\n def test_add_credit_not_logged_in(self):\n params = {'amount': '20.00', 'funding_source': 'Fake'}\n response = self.app.post(self.uri_for('transaction.add_credit'),\n params)\n\n redirect_url = self.uri_for('transaction.add_credit')\n self.assertRedirects(response,\n self.uri_for('login', redirect=redirect_url))\n\n def test_invalid_transaction_key_logs_error(self):\n with mock.patch('logging.error') as logging_error:\n params = {'transaction_key': 'BAD KEY'}\n response = self.app.post(self.uri_for('transaction.process'),\n params, headers=self.TASKQUEUE_HEADERS)\n self.assertOk(response)\n self.assertTrue(logging_error.called)\n self.assertEqual(1, logging_error.call_count)\n\n @testing.logged_in\n def test_add_credit_fails_without_a_funding_source(self):\n params = {'amount': 20.00, 'funding_source': ''}\n response = self.app.post(self.uri_for('transaction.add_credit'), params)\n self.assertOk(response)\n self.assertFlashMessage(message=error_messages.FUNDING_SOURCE_REQUIRED,\n level='error', response=response)\n\n # Ensure no transactions are created.\n self.assertLength(0, Transaction.all())\n\n @testing.logged_in\n def test_email_is_sent_after_successful_deposit(self):\n profile = self.get_current_profile()\n funding_source = self.create_funding_source(\n parent=profile, status=FundingSource.Status.Accepted)\n\n params = {'amount': '20.00', 'funding_source': str(funding_source.key())}\n response = self.app.post(self.uri_for('transaction.add_credit'), params)\n self.assertRedirects(response, self.uri_for('home'))\n\n # Check that the tranasction processing job has been added to the queue.\n tasks = self.taskqueue_stub.get_filtered_tasks(\n url=self.uri_for('transaction.process'))\n self.assertLength(1, tasks)\n\n # Check that the transaction has a task queued to process it.\n task, = tasks\n params = task.extract_params()\n transaction = Transaction.all().get()\n self.assertEqual(str(transaction.key()), params['transaction_key'])\n self.assertEqual(self.uri_for('transaction.process'), task.url)\n\n # Run the transaction processing job and verify that it is successful.\n with mock.patch('stripe.Charge.create') as stripe_charge:\n stripe_charge.return_value = mock.Mock(id='ch_1stsWjqBqYSOtr')\n response = self.app.post(task.url, params,\n headers=self.TASKQUEUE_HEADERS)\n self.assertOk(response)\n\n # Check that a mail-sending task is in the queue.\n tasks = self.taskqueue_stub.get_filtered_tasks(queue_names='mail')\n self.assertLength(1, tasks)\n\n # Run the task (it should be a deferred call) and check that an e-mail\n # is sent.\n task, = tasks\n deferred.run(task.payload)\n messages = self.mail_stub.get_sent_messages()\n self.assertLength(1, messages)\n\n # Verify that the e-mail sent has the right information.\n profile = self.get_current_profile()\n message, = messages\n self.assertEqual('\"%s\" <%s>' % (profile.name, profile.email), message.to)\n self.assertEqual(email.DEPOSIT_SUBJECT, message.subject)\n self.assertEqual(constants.FULL_NO_REPLY_EMAIL, message.sender)\n self.assertEqual(constants.FULL_SUPPORT_EMAIL, message.reply_to)\n self.assertTemplateUsed('emails/transaction.haml')\n\n email_greeting = PyQuery(message.html.decode())('h3.email_greeting')\n self.assertEqual('Hi %s,' % profile.name, email_greeting.text())\n\n email_message = PyQuery(message.html.decode())('p.email_message')\n self.assertEqual(email.DEPOSIT_MESSAGE, email_message.text())\n\n email_table = PyQuery(message.html.decode())('tbody td')\n self.assertEqual(format_datetime(transaction.get_recipient_time()),\n email_table.filter('.email_time').text())\n self.assertEqual(transaction.get_transaction_amount(),\n format(email_table.filter('.email_amount').text()))\n self.assertEqual(Transaction.Status.Completed,\n email_table.filter('.email_status').text())\n\n @testing.logged_in\n def test_email_is_not_sent_after_unsuccessful_deposit(self):\n params = {'amount': 20.00, 'funding_source': ''}\n response = self.app.post(self.uri_for('transaction.add_credit'), params)\n self.assertOk(response)\n\n # Ensure no transactions are created and no mail sending task is in queue.\n self.assertLength(0, Transaction.all())\n tasks = self.taskqueue_stub.get_filtered_tasks(queue_names='mail')\n self.assertLength(0, tasks)\n\n @testing.logged_in\n def test_add_credit_fails_with_invalid_funding_source(self):\n params = {'amount': 20.00, 'funding_source': 'INVALID'}\n response = self.app.post(self.uri_for('transaction.add_credit'), params)\n self.assertFlashMessage(message=error_messages.FUNDING_SOURCE_NOT_FOUND,\n level='error', response=response)\n self.assertLength(0, Transaction.all())\n self.assertEqual(0, self.get_current_profile().usd_balance)\n\n @testing.logged_in\n def test_add_credit_fails_with_unauthorized_funding_source(self):\n funding_source = self.create_funding_source()\n params = {'amount': 20, 'funding_source': str(funding_source.key())}\n response = self.app.post(self.uri_for('transaction.add_credit'), params)\n self.assertFlashMessage(message=error_messages.UNAUTHORIZED_FUNDING_SOURCE,\n level='error', response=response)\n self.assertLength(0, Transaction.all())\n self.assertEqual(0, self.get_current_profile().usd_balance)\n","repo_name":"kleyow/webapp2_boilerplate","sub_path":"handlers/tests/test_transaction_add_credit.py","file_name":"test_transaction_add_credit.py","file_ext":"py","file_size_in_byte":8676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"20039719119","text":"from process import *\nfrom create import *\nimport warnings\nimport pandas as pd\n\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\ncolumn_names = ['Average queue', 'Average load', 'Average task rate']\n\n\nclass Model:\n general_mean_load = 0\n general_mean_queue = 0\n Result_table = pd.DataFrame(columns=column_names)\n\n def __init__(self, elements_list):\n self.element_list = elements_list\n self.t_next = 0\n self.event = 0\n self.t_curr = 0\n self.stable = []\n self.mean_length_of_queue_list = list()\n self.mean_load_list = list()\n self.rate_of_completed_tasks_list = list()\n self.directive_fail_list = list()\n self.general_mean_load, self.general_mean_queue = 0.0, 0.0\n\n def simulate(self, time, flag):\n while self.t_curr < time:\n self.t_next = np.inf\n for element in self.element_list:\n t_next_val = np.min(element.t_next)\n if t_next_val < self.t_next:\n self.t_next = t_next_val\n self.event = element.id_element\n\n if flag is True:\n print(\"*************************************\")\n print(f\"Event name: {self.element_list[self.event].name}, time = {self.t_next}\")\n print(\"*************************************\")\n\n for element in self.element_list:\n element.statistics(self.t_next - self.t_curr)\n\n self.t_curr = self.t_next\n for element in self.element_list:\n element.t_curr = self.t_curr\n\n if len(self.element_list) > self.event:\n self.element_list[self.event].out_act()\n\n for element in self.element_list:\n if self.t_curr in element.t_next:\n element.out_act()\n\n if len(self.element_list) > self.event and self.element_list[self.event].id_element == 0:\n self.element_list[self.event].print_event_info(flag)\n self.print_info(flag)\n\n self.print_result(flag)\n self.experiments()\n\n def print_info(self, flag):\n if flag is True:\n for element in self.element_list:\n element.print_info()\n\n def print_result(self, flag):\n income_task = 0\n if flag is True:\n print(\"\\n*****************_RESULTS_*****************\")\n for element in self.element_list:\n element.result()\n if isinstance(element, Create):\n income_task = element.quantity\n elif isinstance(element, Process):\n e = element\n self.general_mean_load += e.mean_load / self.t_curr\n self.general_mean_queue += e.mean_queue / self.t_curr\n print(f\"Mean length of queue: {e.mean_queue / self.t_curr}\")\n self.mean_length_of_queue_list.append(e.mean_queue / self.t_curr)\n print(f\"Max queue: {e.count_max_queue}\")\n print(f\"Mean load: {e.mean_load / self.t_next}\")\n self.mean_load_list.append(e.mean_load / self.t_next)\n print(f\"Tasks that failed directive term: {e.directive_fail}\")\n self.directive_fail_list.append(e.directive_fail)\n print(f\"Rate of completed tasks: {e.quantity / income_task}\")\n self.rate_of_completed_tasks_list.append(e.quantity / income_task)\n print()\n print()\n\n def experiments(self):\n avg_mean_load, avg_mean_queue = 0.0, 0.0\n income, done = 0, 0\n for e in self.element_list:\n if isinstance(e, Create):\n income = e.quantity\n elif isinstance(e, Process):\n avg_mean_load += e.mean_load / self.t_curr\n avg_mean_queue += e.mean_queue / self.t_curr\n if e.id_element == 3:\n done = e.quantity\n Model.general_mean_load = avg_mean_load / 3\n Model.general_mean_queue = avg_mean_queue / 3\n Model.rate_task = done / income\n result = pd.DataFrame([[Model.general_mean_queue, Model.general_mean_load, Model.rate_task]],\n columns=column_names)\n Model.Result_table = Model.Result_table.append(result, ignore_index=True)\n","repo_name":"Agupnik/system_modeling_kpi_fict","sub_path":"Course_work/Program_Realization/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"441323219","text":"from kazoo.client import KazooClient\nfrom kazoo.exceptions import ZookeeperError\nimport os\nimport pytest\n\n\nZK_INSTANCE = 0\n\n\n@pytest.fixture\ndef zk():\n zk = KazooClient('localhost:{}'.format(os.getenv('RECIPE_ZOOKEEPER_PORT')))\n\n global ZK_INSTANCE\n zk_prefix = '/{}'.format(ZK_INSTANCE)\n ZK_INSTANCE += 1\n\n try:\n zk.start()\n zk.ensure_path(zk_prefix)\n zk.chroot = zk_prefix\n yield zk\n finally:\n try:\n zk.chroot = '/'\n zk.delete(zk_prefix, recursive=True)\n zk.stop()\n except ZookeeperError:\n pass\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"market/tests/reusable_zk.py","file_name":"reusable_zk.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"3811166914","text":"from random import randint\nprint(\"=-=\"*10)\nprint(\"VAMOS JOGAR PAR OU IMPAR ?\")\nprint(\"=-=\"*10)\nvitoria = 0\nwhile True:\n jog = int(input(\"Diga um valor? \"))\n cpu = randint(0, 10)\n result = jog + cpu\n escolha = \" \"\n while escolha not in \"PI\":\n escolha = str(input(\"Voce escolhe PAR ou IMPAR [P/I] ?\")).strip().upper()[0]\n print(f\"Voce jogou {jog} e o computador jogou {cpu}. Total é {result}.\")\n print(\"DEU PAR.\" if result % 2 == 0 else \"DEU IMPAR.\")\n if escolha == \"P\":\n if result % 2 == 0:\n print(\"Voce Venceu! Parabens!\")\n vitoria += 1\n else:\n print(\"Voce Perdeu!\")\n break\n elif escolha == \"I\":\n if result % 2 == 1:\n print(\"Voce Venceu! Parabens!\")\n vitoria += 1\n else:\n print(\"Você Perdeu!\")\n break\n print(\"Vamos jogar novamente!\")\nprint(f\"Voce venceu {vitoria} partida(s) seguidas!\")","repo_name":"MuriloAsurara88/aprendendo-python","sub_path":"ex068.py","file_name":"ex068.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"28507231174","text":"import numpy as np\n\nfrom ..AShape import AShape\nfrom ..backend import Kernel\nfrom ..HKernel import HKernel\nfrom ..info import BroadcastInfo\nfrom ..SCacheton import SCacheton\nfrom ..Tensor import Tensor\n\ndef remap (input_t : Tensor, coords_t : Tensor, dtype=None) -> Tensor:\n \"\"\"\n remap input_t in spatial axes using coords_t\n\n arguments\n\n input_t Tensor( ...,IH,IW )\n\n coords_t Tensor( ...,OH,OW,D )\n OH - output height\n OW - output width\n D is (2)[x,y] coords\n\n dtype\n\n ...-head part of shapes will be broadcasted to each other\n \"\"\"\n\n op = SCacheton.get(_RemapOp, input_t.shape, input_t.dtype, coords_t.shape, coords_t.dtype, dtype)\n\n output_t = Tensor( op.o_shape, op.o_dtype, device=input_t.get_device() )\n\n input_t.get_device().run_kernel(op.forward_krn, output_t.get_buffer(), input_t.get_buffer(), coords_t.get_buffer())\n\n return output_t\n\n\nclass _RemapOp():\n def __init__(self, i_shape : AShape, i_dtype, c_shape : AShape, c_dtype, o_dtype):\n if np.dtype(i_dtype).type == np.bool_:\n raise ValueError('np.bool_ dtype of i_dtype is not supported.')\n if np.dtype(c_dtype).type == np.bool_:\n raise ValueError('np.bool_ dtype of c_dtype is not supported.')\n if i_shape.ndim < 2:\n raise ValueError('i_shape.ndim must be >= 2 (...,H,W)')\n if c_shape.ndim < 3:\n raise ValueError(f'Coords shape ndim must be >= 3(...,H,W,D)')\n if c_shape[-1] != 2:\n raise ValueError('Last coords dim must be == 2 (x,y)')\n\n self.o_dtype = o_dtype = o_dtype if o_dtype is not None else i_dtype\n\n if i_shape.ndim == 2 and c_shape.ndim == 3:\n # nothing to broadcast\n\n i_br_shape = i_shape\n c_br_shape = c_shape\n\n o_shape = c_shape[-3:-1]\n else:\n op = BroadcastInfo([ i_shape[:-2], c_shape[:-3] ])\n\n i_br_shape = op.br_shapes[0] + i_shape[-2:]\n c_br_shape = op.br_shapes[1] + c_shape[-3:]\n\n o_shape = op.o_shape + c_shape[-3:-1]\n\n self.o_shape = o_shape\n\n self.forward_krn = Kernel(global_shape=(o_shape.size,), kernel_text=f\"\"\"\n\n{HKernel.define_tensor('O', o_shape, o_dtype)}\n{HKernel.define_tensor('I', i_br_shape, i_dtype)}\n{HKernel.define_tensor('C', c_br_shape[:-1], c_dtype)}\n\n__kernel void impl(__global O_PTR_TYPE* O_PTR_NAME, __global const I_PTR_TYPE* I_PTR_NAME, __global const C_PTR_TYPE2* C_PTR_NAME)\n{{\n size_t gid = get_global_id(0);\n\n {HKernel.decompose_idx_to_axes_idxs('gid', 'o', o_shape.ndim)}\n\n C_TYPE2 c_value = C_GLOBAL_LOAD2(C_IDX_MOD({HKernel.axes_seq_enum('O', o_shape.ndim)}));\n\n float cx01 = (float) c_value.x;\n float cy01 = (float) c_value.y;\n\n float cx0f = floor(cx01); int cx0 = (int)cx0f;\n float cy0f = floor(cy01); int cy0 = (int)cy0f;\n float cx1f = cx0f+1; int cx1 = (int)cx1f;\n float cy1f = cy0f+1; int cy1 = (int)cy1f;\n\n float p00 = I_GLOBAL_LOAD(I_IDX_MOD({HKernel.axes_seq_enum('O', o_shape.ndim-2, suffix='cy0,cx0')}));\n float p01 = I_GLOBAL_LOAD(I_IDX_MOD({HKernel.axes_seq_enum('O', o_shape.ndim-2, suffix='cy0,cx1')}));\n float p10 = I_GLOBAL_LOAD(I_IDX_MOD({HKernel.axes_seq_enum('O', o_shape.ndim-2, suffix='cy1,cx0')}));\n float p11 = I_GLOBAL_LOAD(I_IDX_MOD({HKernel.axes_seq_enum('O', o_shape.ndim-2, suffix='cy1,cx1')}));\n\n p00 *= (cx1f - cx01)*(cy1f - cy01)*(cy0 >= 0 & cy0 < Im2 & cx0 >= 0 & cx0 < Im1);\n p01 *= (cx01 - cx0f)*(cy1f - cy01)*(cy0 >= 0 & cy0 < Im2 & cx1 >= 0 & cx1 < Im1);\n p10 *= (cx1f - cx01)*(cy01 - cy0f)*(cy1 >= 0 & cy1 < Im2 & cx0 >= 0 & cx0 < Im1);\n p11 *= (cx01 - cx0f)*(cy01 - cy0f)*(cy1 >= 0 & cy1 < Im2 & cx1 >= 0 & cx1 < Im1);\n\n O_GLOBAL_STORE(gid, p00 + p01 + p10 + p11);\n}}\n\"\"\")\n","repo_name":"iperov/DeepFaceLive","sub_path":"xlib/avecl/_internal/op/remap.py","file_name":"remap.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","stars":18963,"dataset":"github-code","pt":"72"}
+{"seq_id":"2116714401","text":"#This can be used to list the problems yet unsolved in C or Python\n#It also lists all of the codechef problems solved alongwith number of problems\n#solved in C and Python'''\n\n#File structure is like\n#C\\codechef\\easy\n#C\\codechef\\medium\n#C\\codechef\\hard\n#Here C can be replaced by python or any other in the list 'top'\n\nimport glob\nimport os\n\ncur = os.getcwd()\n#Use Indexing according to i\ntop = ['c', 'python2']\nextensions = ['*.c', '*.py']\n#Use Indexing according to j\ndown = ['easy', 'medium', 'hard']\n\nwrite_file = cur + '\\\\__codechef_status.py'\nw = open(write_file, 'w+')\n\nfor j in range(len(down)):\n all_solved = set()\n\n #Use Indexing according to i\n solved = [0] * len(top)\n\n for i in range(len(top)):\n #The file structure being used is present in a folder\n #git_repo\n #Change the below file structure to go to correct directory\n os.chdir(cur + '\\\\..\\\\..\\\\' +\n top[i] + '\\\\codechef\\\\' + down[j])\n\n temp = extensions[i]\n\n solved[i] = set(\n [cur_file[:len(cur_file) - len(temp) + 1].lower()\n for cur_file in glob.glob(temp)]\n )\n all_solved.update(solved[i])\n\n w.write(\"Total number of codechef \" + down[j])\n w.write(\" problems solved = \" + str(len(all_solved)))\n\n if len(all_solved) > 0:\n w.write('\\n\\n')\n for i in range(len(top)):\n temp = all_solved - solved[i]\n w.write(\"\\nProblems unsolved in \")\n w.write(top[i] + \" = \" + str(len(temp)))\n w.write(\"\\n\\n\")\n for k in temp:\n w.write(\"#\" + str(k) + \"\\n\")\n\n w.write(\"\\n----------------------------------------\\n\")\n\nw.close()\n","repo_name":"anshbansal/general","sub_path":"Python3/Automation/Listing codechef status.py","file_name":"Listing codechef status.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"}
+{"seq_id":"233674228","text":"from webdriver_manager.chrome import ChromeDriverManager\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nfrom time import sleep, time\r\n\r\ndef oncfAuto(processNumber):\r\n start = time()\r\n options = webdriver.ChromeOptions()\r\n # exclude debugging msgs\r\n options.add_experimental_option('excludeSwitches', ['enable-logging'])\r\n options.add_argument('--log-level=3')\r\n driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)\r\n driver.maximize_window() \r\n print(f'####################### Process N° {processNumber} - Launching Chrome #######################')\r\n action = ActionChains(driver)\r\n driver.get('https://www.oncf-voyages.ma/')\r\n print(f'Process N° {processNumber} - Waiting for page to load..')\r\n WebDriverWait(driver, 1000).until(EC.presence_of_element_located((By.ID, 'origin')))\r\n sleep(3)\r\n depart = driver.find_element(By.ID, 'origin')\r\n sleep(1)\r\n depart.click()\r\n sleep(1)\r\n action.send_keys('Casa Port').key_down(Keys.ENTER).key_up(Keys.ENTER).perform()\r\n print(f'Process N° {processNumber} - Ville Depart')\r\n sleep(1)\r\n destination = driver.find_element(By.ID, 'destination')\r\n sleep(1)\r\n destination.click()\r\n sleep(1)\r\n action.key_down(Keys.CONTROL).send_keys('a').key_up(Keys.CONTROL).send_keys('Rabat Agdal').key_down(Keys.ENTER).key_up(Keys.ENTER).perform()\r\n print(f'Process N° {processNumber} - Ville Destination')\r\n sleep(1)\r\n search = driver.find_element(By.CLASS_NAME, 'searchForm_footer ').click()\r\n print(f'Process N° {processNumber} - Loading results..')\r\n WebDriverWait(driver, 1000).until(EC.presence_of_element_located((By.CLASS_NAME, 'ant-btn.btn-default.ant-btn-default.ant-btn-round')))\r\n reserver = driver.find_elements(By.CLASS_NAME,'ant-btn.btn-default.ant-btn-default.ant-btn-round')\r\n reserver[-1].click()\r\n print(f'Process N° {processNumber} - Reserve for last trip..')\r\n WebDriverWait(driver, 1000).until(EC.presence_of_element_located((By.CLASS_NAME, 'ant-btn.btn-default-ghost.tariffsrecomandation_card_cta.card-button-active.Flex.ant-btn-default.ant-btn-round.ant-btn-background-ghost')))\r\n sleep(5)\r\n print(f'Process N° {processNumber} - Select Trip..')\r\n select = driver.find_element(By.CLASS_NAME, 'ant-btn.btn-default-ghost.tariffsrecomandation_card_cta.card-button-active.Flex.ant-btn-default.ant-btn-round.ant-btn-background-ghost').click()\r\n WebDriverWait(driver, 1000).until(EC.presence_of_element_located((By.CLASS_NAME, 'ant-btn.btn-secondary.btn-large.ant-btn-secondary.ant-btn-round')))\r\n sleep(5)\r\n print(f'Process N° {processNumber} - Add to Basket..')\r\n add = driver.find_element(By.CLASS_NAME, 'ant-btn.btn-secondary.btn-large.ant-btn-secondary.ant-btn-round').click()\r\n sleep(3)\r\n # Go to AUTH page \r\n driver.get('https://www.oncf-voyages.ma/resultats-disponibilites/authentification')\r\n WebDriverWait(driver, 1000).until(EC.presence_of_element_located((By.ID, 'SignInFormUsername')))\r\n sleep(3)\r\n print(f'Process N° {processNumber} - Logging..')\r\n email = driver.find_element(By.ID, 'SignInFormUsername').send_keys('ebdeu.slave@gmail.com')\r\n sleep(1)\r\n passwd = driver.find_element(By.ID, 'SignInFormPassword').send_keys('maroc.2022')\r\n sleep(1)\r\n login = driver.find_element(By.CLASS_NAME, 'ant-btn.btn-secondary.SignInForm_ctaSignIn.ant-btn-secondary.ant-btn-round').click()\r\n WebDriverWait(driver, 1000).until(EC.presence_of_element_located((By.CLASS_NAME, 'PassengerComponent_form-label')))\r\n sleep(2)\r\n print(f'Process N° {processNumber} - Writing my infos..')\r\n inputs = driver.find_elements(By.CLASS_NAME, 'ant-input.CustomInput ')\r\n inputs[0].send_keys('Abderrahim')\r\n sleep(1)\r\n inputs[1].send_keys('Essaouaf')\r\n sleep(1)\r\n accept = driver.find_elements(By.CLASS_NAME, 'ant-checkbox-input')[1].click()\r\n sleep(1)\r\n pay = driver.find_element(By.CLASS_NAME, 'ant-btn.btn-default.FormComponent_confirm.ant-btn-default.ant-btn-round').click()\r\n print(f'Process N° {processNumber} - Going to Payment Step (CMI Page)')\r\n end = time()\r\n duration = end - start\r\n print(f'Process N° {processNumber} - Automation Finished in {round(duration,2)} seconds | {round(duration/60,2)} minutes')\r\n sleep(30)\r\n print(f'Process N° {processNumber} - Window Closed ')\r\n \r\n \r\n\r\n# run script multiple times at the same time using multiprocessing module\r\n# WARNING : use n times depends on your CPU power, madirsh chi ra9m kbir f processeur na9s radi y tplonta\r\nfrom multiprocessing import Process\r\n# change n times according to your need\r\ntimes = 10\r\n# collecting processes in a list\r\nprocesses = [Process(target=oncfAuto, args=(str(i),)) for i in range(1,times+1)]\r\n\r\nif __name__ == '__main__': \r\n s = time()\r\n for p in processes:\r\n p.start()\r\n \r\n for p in processes:\r\n p.join()\r\n e = time()\r\n d = e-s\r\n print(f'processes finished in {round(d,2)} seconds | {round(d/60,2)} minutes')\r\n","repo_name":"ebdeuslave/public_apps","sub_path":"automate_oncf.py","file_name":"automate_oncf.py","file_ext":"py","file_size_in_byte":5289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"10643812648","text":"import cocos.menu\nimport cocos.scene\nimport cocos.layer\nimport cocos.actions as ac\nfrom cocos.director import director\nfrom cocos.scenes.transitions import FadeTRTransition\n\nimport pyglet.app\n\nfrom gamelayer import new_game\n\n\nclass MainMenu(cocos.menu.Menu):\n def __init__(self):\n super(MainMenu, self).__init__('Tower Defense')\n\n self.font_title['font_name'] = 'Oswald' #使用towerdefense.py载入的字体 标题、选项、选中的选项都使用\n self.font_item['font_name'] = 'Oswald'\n self.font_item_selected['font_name'] = 'Oswald' #\n\n self.menu_anchor_y = 'center' #设定置中\n self.menu_anchor_x = 'center'\n\n items = list() #选项用list来储存\n items.append(cocos.menu.MenuItem('New Game', self.on_new_game)) #一般可点击一次的选项,开始新游戏\n items.append(cocos.menu.ToggleMenuItem('Show FPS: ', self.show_fps, director.show_FPS)) #布林选项,传入值至show_fps(),决定是否要显示FPS则在show_fps()内以及第三个参数都要director.show_FPS,缺一不可\n \n items.append(cocos.menu.MenuItem('Quit', pyglet.app.exit)) #离开游戏\n\n self.create_menu(items, ac.ScaleTo(1.25, duration=0.25), ac.ScaleTo(1.0, duration=0.25)) #由于cocos2d 除了精灵以外 其他东西也都是继承是cocosNode 这意味着一般操控精灵的\"动作\"也可以使用于这,ScaleTo用于将目标缩放至倍数大小,第一个设定选中时将该选项放大至1.25倍在0.25秒内,第二个是放开时在0.25秒内回复到1倍\n\n def on_new_game(self): #转场功能,点击New Game在2秒内转场至游戏场景 FadeTRTransition是转场特效\n director.push(FadeTRTransition(new_game(), duration=2))\n\n def show_fps(self, val): #当FPS选项ON时会传入1 OFF时会传入0\n director.show_FPS = val\n\ndef new_menu():\n scene = cocos.scene.Scene()\n color_layer = cocos.layer.ColorLayer(205, 133, 63, 255)\n scene.add(MainMenu(), z=1) #选单\n scene.add(color_layer, z=0) #颜色涂层\n return scene\n","repo_name":"healthyvitamin/python_game","sub_path":"塔防/mainmenu.py","file_name":"mainmenu.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"42652076915","text":"\n\n\"\"\"\nScript: download_stock_price_data.py\n\nThis script will help to download stock price data from Yahoo finance. Once we have the data available, we can store\nit in csv or pickle format.\n\n\nAfter downloading the data from Yahoo, a data-cleaning process is required. Most of the time, the data has two dimensions,\nstock name and stock price attributes such as date, open, low, high, close, adjClose and volume.\n\nIn our design, we want to have data grouped by attribute. For example, if the attribute of interest in closed price,\nwe will have a DataFrame called close. The index is the date, each column will represent a stock.\n\n\nAttention:\n Double check the calculation of the stock return DataFrame. Do not use \"future data\" in the current calculation.\n\n\"\"\"\n\n\n\n\n\nimport pandas as pd\nfrom pandas.io.data import DataReader\nfrom os import path\n\n\n\n# set the start date and end date and the list of stocks\n#\n# start_date = pd.datetime(2009,1,1)\n# end_date = pd.datetime(2013,06,30)\n# stock_list = ['aapl','msft','mmm','ibm','jpm','wmt','yhoo','gps','ge','f']\n#\n#\n# df = DataReader('AAPL', 'yahoo', start_date, end_date)\n#\n# df.to_csv(path.join(r'/Users/Ruikun/workspace/backtesting_platform_local/data', 'test_index.csv'))\n\n_default_list_stock = ['aapl','msft','mmm','ibm','jpm','wmt','yhoo','gps','ge','f']\n\n\ndef download(start_date=None, end_date=None, list_stock=None, output_path=None):\n \"\"\"\n This function will download data from Yahoo Finance.\n\n Args:\n start_date: datetime object indicating the start of the period\n end_date: datetime object indicating the end of the period\n list_of_stock: list of stocks of interest. The stock is identified by symbol.\n output_path: path to store the download files.\n\n Return:\n The function will return the Date index of the download files. This is useful when we need to reindex other\n pd.DataFrame.\n\n \"\"\"\n assert start_date\n assert end_date\n assert output_path\n\n list_stock = list_stock or _default_list_stock\n list_stock = [x.upper() for x in list_stock]\n\n # create data container\n # this is a dictionary. The key is the ticker of the stock and the value is the\n # DateFrame that contains the historical information of that stock\n data_container = dict()\n\n for stock in list_stock :\n try:\n data_container[stock] = DataReader(stock, 'yahoo', start_date, end_date)\n except Exception as e:\n print(\"Failed to download {} price data from Yahoo Finance.\".format(stock))\n print(e)\n\n open = pd.DataFrame()\n close = pd.DataFrame()\n low = pd.DataFrame()\n high = pd.DataFrame()\n volume = pd.DataFrame()\n adjClose = pd.DataFrame()\n\n\n for ticker in list_stock:\n open[ticker] = data_container[ticker]['Open']\n close[ticker] = data_container[ticker]['Close']\n low[ticker] = data_container[ticker]['Low']\n high[ticker] = data_container[ticker]['High']\n volume[ticker] = data_container[ticker]['Volume']\n adjClose[ticker] = data_container[ticker]['Adj Close']\n\n\n open.to_csv(path.join(output_path, 'open.csv'))\n close.to_csv(path.join(output_path, 'close.csv'))\n low.to_csv(path.join(output_path, 'low.csv'))\n high.to_csv(path.join(output_path, 'high.csv'))\n volume.to_csv(path.join(output_path, 'volume.csv'))\n adjClose.to_csv(path.join(output_path, 'adjClose.csv'))\n\n\n rtn = adjClose / adjClose.shift(1) - 1.\n rtn.to_csv(path.join(output_path, 'rtn.csv'))\n\n return volume.index\n\n\n\n\n\n\n\n\n\n\n\n# Now create variables that is convenient for data analysis\n\n# The variables are in the form of a matrix(dataframe). Each column\n# represents a stock and the row index is the date.\n\n# Note that the column name of the dataframe should have the same order as the stock_list\n\n\n\n# create price and volume variables\n\n\n#\n#\n# # create return variables\n# nrow , ncol = adjClose.shape\n# stReturn_pre = adjClose.values[1:] / adjClose.values[:-1] - 1.\n# stReturn = pd.DataFrame(np.zeros_like(adjClose),index=adjClose.index, columns=adjClose.columns )\n# stReturn[1:] = stReturn_pre\n# stReturn.ix[0] = np.nan\n#\n#\n#\n#\n# # save dataframe to disk in a binary form\n#\n#\n# print ('saving DataFrame to disk...')\n#\n# open.to_pickle (path=r'../data/open_dataframe')\n# close.to_pickle (path=r'../data/close_dataframe')\n# low.to_pickle (path=r'../data/low_dataframe')\n# high.to_pickle (path=r'../data/high_dataframe')\n# volume.to_pickle (path=r'../data/volume_dataframe')\n# adjClose.to_pickle (path=r'../data/adjClose_dataframe')\n# stReturn.to_pickle (path=r'../data/stReturn_dataframe')\n#\n#\n#\n#\n# print ('Done.')\n#\n#\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"meretciel/backtesting_platform","sub_path":"script/download_stock_price_data.py","file_name":"download_stock_price_data.py","file_ext":"py","file_size_in_byte":4768,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"28017460189","text":"import pandas as pd\r\nimport os\r\nfrom urllib.request import urlopen\r\nfrom bs4 import BeautifulSoup\r\nimport nltk\r\nnltk.download('stopwords')\r\nimport requests\r\nimport re\r\n\r\n\r\nstop_words = set()\r\n\r\n# Specify the path to the stop words directory\r\nstop_words_dir = 'stop_words'\r\n\r\n# Iterate over all files in the stop words directory\r\nfor filename in os.listdir(stop_words_dir):\r\n filepath = os.path.join(stop_words_dir, filename)\r\n \r\n # Read the stop words from the file\r\n with open(filepath, 'r') as f:\r\n for line in f:\r\n # Ignore text after '|'\r\n word = line.split('|')[0].strip().lower()\r\n stop_words.add(word)\r\n\r\n# Download the required nltk packages\r\nnltk.download('punkt')\r\n\r\n# Load positive and negative words\r\npositive_words = set(open('positive_words.txt').read().splitlines())\r\nnegative_words = set(open('negative_words.txt').read().splitlines())\r\n\r\n# Load input file\r\ninput_file = pd.read_excel('input.xlsx')\r\n\r\n# Create a new directory for the articles\r\nif not os.path.exists('articles'):\r\n os.makedirs('articles')\r\n\r\n# Create output dataframe\r\noutput = pd.DataFrame(columns=['URL_ID', 'URL', 'Positive Score', 'Negative Score', 'Polarity Score', 'Subjectivity Score', 'Average Sentence Length', 'Average Number of Words Per Sentence', 'Percentage of Complex Words', 'Complex Word Count', 'Fog Index','Word Count','Syllable Per Word','Personal Pronouns','Average Word Length'])\r\n\r\n# Loop through each URL in the input file and extract the article text and title\r\nfor i in range(len(input_file)):\r\n url_id = input_file['URL_ID'][i]\r\n url = input_file['URL'][i]\r\n print(f\"Processing {url}\")\r\n \r\n # Load the webpage and extract the article text\r\n res = requests.get(url)\r\n soup = BeautifulSoup(res.content, 'html.parser')\r\n article_title = ''\r\n if soup.find('h1') is not None:\r\n article_title = soup.find('h1').text.strip()\r\n \r\n article_text = ''\r\n for p in soup.find_all('p'):\r\n article_text += p.text.strip() + '\\n\\n\\n'\r\n\r\n \r\n # Save the article as a text file with the URL_ID as the filename \r\n filename = f'articles/{url_id}.txt'\r\n with open(filename, 'w', encoding='utf-8') as file: #storing text into string format with (context manager)\r\n file.write(f'{article_title}\\n\\n\\n\\n{article_text}')\r\n\r\n # Print the filename for confirmation\r\n print(f'Saved {filename}')\r\n\r\n \r\n # Calculate positive score\r\n words = nltk.word_tokenize(article_text.lower())\r\n\r\n #remove stop word\r\n words = [word for word in words if word not in stop_words]\r\n\r\n # Initialize the Positive Score to 0\r\n pos_score = 0\r\n\r\n # Calculate the Positive Score\r\n for word in words:\r\n if word in positive_words:\r\n pos_score += 1\r\n \r\n # Initialize the Negative Score to 0\r\n neg_score = 0\r\n\r\n # Calculate the Negative Score\r\n for word in words:\r\n if word in negative_words:\r\n neg_score -= 1\r\n\r\n # Multiply the Negative Score by -1 to make it a positive number\r\n neg_score *= -1\r\n \r\n # Calculate polarity score\r\n polarity_score = (pos_score - neg_score) / ((pos_score + neg_score) + 0.000001) \r\n \r\n # Calculate the Total Number of Words\r\n total_words = len(words)\r\n\r\n # Calculate the Subjectivity Score\r\n subjectivity_score = (pos_score + neg_score) / (total_words + 0.000001)\r\n\r\n \r\n # Calculate average sentence length\r\n sentences = nltk.sent_tokenize(article_text)\r\n num_sentences = len(sentences)\r\n num_words = len(words)\r\n avg_sentence_length = num_words / num_sentences if num_sentences != 0 else 0\r\n \r\n # Calculate average number of words per sentence\r\n avg_words_per_sentence = num_words / num_sentences if num_sentences != 0 else 0\r\n \r\n # Calculate percentage of complex words\r\n complex_words = [word for word in words if len(word) > 2 and word not in nltk.corpus.stopwords.words('english')] \r\n percent_complex_words = len(complex_words) / len(words) if len(words) != 0 else 0\r\n \r\n\r\n # Calculate average word length\r\n total_word_length = sum(len(word) for word in words)\r\n average_word_length = total_word_length / len(words) if len(words) != 0 else 0\r\n\r\n # Calculate word count after cleaning text\r\n cleaned_words = [word for word in words if word not in nltk.corpus.stopwords.words('english') and word.isalpha()]\r\n word_count = len(cleaned_words)\r\n\r\n\r\n # Calculate Syllable Count Per Word\r\n def count_syllables(word): \r\n vowels = 'aeiouy'\r\n count = 0\r\n if word[0] in vowels:\r\n count += 1\r\n for index in range(1, len(word)):\r\n if word[index] in vowels and word[index - 1] not in vowels: #This is because a vowel sound in a word usually marks the beginning of a new syllable\r\n count += 1\r\n if word.endswith('es') or word.endswith('ed'):\r\n count -= 1\r\n if count == 0:\r\n count = 1\r\n return count\r\n\r\n syllable_count_per_word = [count_syllables(word) for word in cleaned_words]\r\n\r\n\r\n # Calculate complex word count\r\n complex_word_count = len(complex_words)\r\n \r\n # Calculate fog index\r\n fog_index = 0.4 * (avg_sentence_length + percent_complex_words)\r\n\r\n \r\n\r\n # Define the personal pronouns regex pattern \r\n personal_pronouns_pattern = re.compile(r'\\b(i|we|my|ours|us)\\b', flags=re.IGNORECASE)\r\n\r\n # Calculate personal pronoun count\r\n personal_pronoun_count = len(re.findall(personal_pronouns_pattern, article_text))\r\n\r\n\r\n \r\n # Add the results to the output dataframe\r\n output = output.append({'URL_ID': url_id, 'URL': url, 'Positive Score': pos_score, 'Negative Score': neg_score, \r\n 'Polarity Score': polarity_score, 'Subjectivity Score': subjectivity_score, \r\n 'Average Sentence Length': avg_sentence_length, 'Average Number of Words Per Sentence': avg_words_per_sentence,\r\n 'Percentage of Complex Words':percent_complex_words, 'Complex Word Count':complex_word_count, 'Fog Index': fog_index,\r\n 'Word Count':word_count,'Syllable Per Word':syllable_count_per_word,'Personal Pronouns':personal_pronoun_count,'Average Word Length':average_word_length },ignore_index=True)\r\n\r\n# Save the output dataframe to a new excel file\r\noutput.to_excel('output.xlsx', index=False)\r\n","repo_name":"iamravi26/Data_processing-and-NLP","sub_path":"Ravi.py","file_name":"Ravi.py","file_ext":"py","file_size_in_byte":6500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"71345602792","text":"from algorithum.a2c import A2C\nimport tensorflow as tf\n\n\nclass PPO(A2C):\n\n def __init__(self, obs_dimension, a_dimension, lr, action_space_length, feature_transform,\n epsilon, model, regular_str, minibatch, epoch, vf_coef, max_grad_norm, worker, is_seperate=False,\n isPysc2=False, isLSTM=False):\n super(PPO, self).__init__(obs_dimension, a_dimension, action_space_length, lr,\n feature_transform, model, regular_str, minibatch, epoch, max_grad_norm, isLSTM,\n isa2c=False, is_seperate=is_seperate)\n self.minibatch = minibatch\n if self.is_seperate:\n self.policy_old_out, self.old_params = model.make_actor_network(input_opr=self.batch['state'],\n name=\"old\",\n batch_size=minibatch,\n train=False)\n\n self.value_old_out, self.old_value_params = model.make_critic_network(input_opr=self.batch['state'],\n name=\"old_value\",\n batch_size=minibatch,\n train=False)\n else:\n self.value_old_out, self.policy_old_out, self.old_params, _, _ = model.make_network(\n input_opr=self.batch['state'],\n name=\"old\",\n batch_size=minibatch,\n train=False)\n\n if self.is_seperate:\n self.sync_network = self.get_sync_old(self.params, self.old_params)\n self.sync_network2 = self.get_sync_old(self.value_params, self.old_value_params)\n else:\n self.sync_network = self.get_sync_old(self.params, self.old_params)\n\n if self.model.is_continuous or self.model.isCat:\n entropy = self.policy_out.entropy()\n c_prob = tf.maximum(self.policy_out.prob(self.batch[\"actions\"]), 1e-8)\n o_prob = tf.maximum(self.policy_old_out.prob(self.batch[\"actions\"]), 1e-8)\n ratio = tf.exp(tf.log(c_prob) - tf.log(o_prob))\n else:\n entropy = tf.reduce_sum(self.policy_out * tf.log(self.policy_out), axis=1, keepdims=True)\n ratio = self.get_discrete_prob(self.policy_out, self.batch[\"actions\"]) / self.get_discrete_prob(\n self.policy_old_out,\n self.batch[\"actions\"])\n\n surr = ratio * self.batch[\"advantage\"]\n ratio_clip_opr = tf.clip_by_value(ratio,\n 1 - epsilon,\n 1 + epsilon)\n\n exp = tf.minimum(surr, ratio_clip_opr * self.batch[\"advantage\"])\n entropy = tf.reduce_mean(entropy) * - self.reg_str\n self.policy_loss_opr = -tf.reduce_mean(exp) + entropy\n\n clipped_value = self.value_old_out + tf.clip_by_value(self.value_out - self.value_old_out, -epsilon, epsilon)\n loss_vf1 = tf.squared_difference(clipped_value, self.batch[\"rewards\"])\n loss_vf2 = tf.squared_difference(self.value_out, self.batch[\"rewards\"])\n self.value_loss_opr = tf.reduce_mean(tf.maximum(loss_vf1, loss_vf2)) * 0.5\n self.total_loss = self.value_loss_opr * vf_coef + self.policy_loss_opr\n\n self.min_policy_loss_opr = self.get_min_clip(self.policy_loss_opr, self.optimizer)\n self.min_value_loss_opr = self.get_min_clip(self.value_loss_opr, self.optimizer)\n self.min_total_loss_opr = self.get_min_clip(self.total_loss, self.optimizer)\n\n if worker is not None:\n opt = tf.train.SyncReplicasOptimizer(self.optimizer, replicas_to_aggregate=worker.nog,\n total_num_replicas=len(worker.worker))\n gradients, variables = zip(*opt.compute_gradients(self.total_loss))\n gradients, _ = tf.clip_by_global_norm(gradients, self.max_grad_norm)\n self.min_total_loss = opt.apply_gradients(zip(gradients, variables), self.global_step)\n self.sync_replicas_hook = opt.make_session_run_hook(worker.wid == 0)\n\n self.init = tf.global_variables_initializer()\n self.saver = tf.train.Saver()\n\n def get_sync_old(self, params, old_params):\n return [old_params.assign(params) for params, old_params in zip(params, old_params)]\n\n def sync_old(self, sess):\n if self.is_seperate:\n sess.run(self.sync_network)\n sess.run(self.sync_network2)\n else:\n sess.run(self.sync_network)\n\n def get_discrete_prob(self, policy_out, a):\n return tf.reduce_sum(policy_out * tf.one_hot(a, self.action_space_length[0], dtype=tf.float32),\n axis=1, keep_dims=True)\n\n def learn(self, sess, episode):\n s, s_, a, r, v, g_adv, adv, q, experience_size = self.feature_t.transform(episode)\n feed_dict = {self.s: s,\n self.td_error: g_adv,\n self.a: a,\n self.v: q\n }\n self.sync_old(sess)\n\n if self.isLSTM:\n sess.run(self.iterator.initializer, feed_dict)\n state = sess.run(self.i_state)\n while True:\n try:\n state, _ = sess.run([self.f_state, self.min_total_loss_opr], feed_dict={self.i_state: state})\n episode.loss = 0\n except tf.errors.OutOfRangeError:\n break\n return episode\n else:\n while True:\n try:\n _ = sess.run(self.min_total_loss_opr, feed_dict)\n episode.loss = 0\n except tf.errors.OutOfRangeError:\n break\n return episode\n\n","repo_name":"funggor123/PPO_LSTM_Pysc2","sub_path":"algorithum/ppo.py","file_name":"ppo.py","file_ext":"py","file_size_in_byte":5967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"19232702883","text":"import os\nimport sys\n\nPY2 = sys.version_info[0] == 2\nPY3 = sys.version_info[0] >= 3\n\nif PY2:\n import ConfigParser\nelse:\n import configparser as ConfigParser\n\n\nif not 'COLLECTOR_CONFIG' in os.environ:\n raise Exception(\"COLLECTOR_CONFIG not found in your environment\")\n\n__config_path = os.environ['COLLECTOR_CONFIG']\n\nif not os.path.exists(__config_path):\n raise Exception(\"COLLECTOR_CONFIG:%s not exist\"%(__config_path))\n\ndef get_conf(fullPath):\n config = ConfigParser.ConfigParser()\n config.read(fullPath)\n return config\n\nCAConfig = get_conf(__config_path);\n\nif __name__ == '__main__':\n print(CAConfig.get('Common','LOG_DIR'))\n","repo_name":"alex-fang/pinpoint-c-agent","sub_path":"collector-agent/Common/Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"}
+{"seq_id":"16176779501","text":"import numpy as np\nimport pandas as pd\nimport math\n\nclass LogisticRegression:\n def __init__(self):\n self.train_valid_ratio = 0.7\n self.train_acc_list = list()\n self.train_loss_list = list()\n self.valid_acc_list = list()\n self.valid_loss_list = list()\n #Best weights\n self.best_w = None\n self.best_b = None\n #Best results\n self.best_epoch = None\n self.best_valid_loss = None\n self.best_valid_acc = None\n\n def initialize_params(self, x):\n w = np.random.rand(x.shape[1])\n bias = np.random.rand()\n return w, bias\n\n def train(self, X, y, batch_size, epoch_size, learning_rate, verbose = True):\n w, b = self.initialize_params(X)\n #adagrad params\n eps = 1e-12\n g_b = 0\n g_w = np.ones(X.shape[1])\n\n #other hyperparams\n best_valid_loss = 99999\n patience = 10 #for early stopping\n \n for num_epoch in range(1, epoch_size+1):\n #Shuffle when each epoch begin\n index = np.arange(X.shape[0])\n np.random.shuffle(index)\n X = X[index]\n y = y[index]\n split_point_x = math.floor(X.shape[0] * self.train_valid_ratio)\n split_point_y = math.floor(y.shape[0] * self.train_valid_ratio)\n X_train = X[:split_point_x, :]\n y_train = y[:split_point_y]\n X_valid = X[split_point_x:, :] \n y_valid = y[split_point_y:]\n\n for num_batch in range(int(X_train.shape[0] / batch_size)):\n #print(\"start\")\n x_batch = X_train[num_batch * batch_size:(num_batch + 1) * batch_size]\n y_batch = y_train[num_batch * batch_size:(num_batch + 1) * batch_size]\n\n #implement adagrad\n w_grad, b_grad = self.compute_gradient(x_batch, y_batch, w, b)\n g_w += w_grad ** 2\n g_b += b_grad ** 2\n\n w = w - learning_rate * w_grad / np.sqrt(g_w + eps)\n b = b - learning_rate * b_grad / np.sqrt(g_b + eps)\n \n #compute loss \n y_train_pred = np.round(self.compute_logistic_value(X_train, w, b))\n train_acc = self.compute_accuracy(y_train_pred, y_train)\n train_loss = self.compute_cross_entropy_loss(y_train_pred, y_train) / X_train.shape[0]\n self.train_acc_list.append(train_acc)\n self.train_loss_list.append(train_loss)\n\n y_valid_pred = np.round(self.compute_logistic_value(X_valid, w, b))\n valid_acc = self.compute_accuracy(y_valid_pred, y_valid)\n valid_loss = self.compute_cross_entropy_loss(y_valid_pred, y_valid) / X_valid.shape[0]\n self.valid_acc_list.append(valid_acc)\n self.valid_loss_list.append(valid_loss)\n\n if verbose:\n print(f\"Epoch {num_epoch}, train loss = {round(train_loss, 4)} (Accuracy: {round(train_acc*100, 3)}%), valid loss = {round(valid_loss, 4)} (Accuracy: {round(valid_acc*100, 3)}%)\")\n \n #save best result\n if valid_loss < best_valid_loss:\n self.best_w = w\n self.best_b = b\n self.best_epoch = num_epoch\n best_valid_loss = valid_loss\n best_valid_acc = valid_acc\n self.best_valid_loss = best_valid_loss\n self.best_valid_acc = best_valid_acc\n\n #early stopping\n if valid_loss > best_valid_loss and num_epoch >= self.best_epoch + patience:\n self.stop_epoch = self.best_epoch + patience\n if verbose:\n print(\"Early Stopping!\")\n print(\"=\"*10 + \"validation result\" + \"=\"*10)\n print(f\"Best epoch is {self.best_epoch} with minimum valid loss = {round(best_valid_loss, 4)} (Accuracy: {round(best_valid_acc*100, 3)}%)\")\n return\n\n self.stop_epoch = num_epoch\n if verbose:\n print(\"Finish model tuning\")\n print(\"=\"*10 + \"Model result\" + \"=\"*10)\n print(f\"Best epoch is {self.best_epoch} with minimum valid loss = {round(best_valid_loss, 4)} (Accuracy: {round(best_valid_acc*100, 3)}%)\")\n\n def train_with_full_data(self, X, y, batch_size, epoch_size, learning_rate, verbose = False):\n w, b = self.initialize_params(X)\n #adagrad params\n eps = 1e-12\n g_b = 0\n g_w = np.ones(X.shape[1])\n \n for num_epoch in range(1, epoch_size+1):\n #Shuffle when each epoch begin\n index = np.arange(X.shape[0])\n np.random.shuffle(index)\n X = X[index]\n y = y[index]\n\n for num_batch in range(int(X.shape[0] / batch_size)):\n x_batch = X[num_batch * batch_size:(num_batch + 1) * batch_size]\n y_batch = y[num_batch * batch_size:(num_batch + 1) * batch_size]\n\n #implement adagrad\n w_grad, b_grad = self.compute_gradient(x_batch, y_batch, w, b)\n g_w += w_grad ** 2\n g_b += b_grad ** 2\n\n w = w - learning_rate * w_grad / np.sqrt(g_w + eps)\n b = b - learning_rate * b_grad / np.sqrt(g_b + eps)\n \n #compute loss \n y_pred = np.round(self.compute_logistic_value(X, w, b))\n train_acc = self.compute_accuracy(y_pred, y)\n train_loss = self.compute_cross_entropy_loss(y_pred, y) / X.shape[0]\n\n if verbose:\n print(f\"Epoch {num_epoch}, train loss = {round(train_loss, 4)} (Accuracy: {round(train_acc*100, 3)}%)\")\n\n self.best_w = w\n self.best_b = b\n return\n\n def predict(self, X_test):\n y_pred = self.compute_logistic_value(X_test, self.best_w, self.best_b)\n y_pred = np.round(y_pred)\n return y_pred\n\n def compute_gradient(self, X, y_true, w, b):\n #print(w.shape)\n y_pred = self.compute_logistic_value(X, w, b).flatten() #dim = (batch_size, )\n pred_error = y_true - y_pred\n w_grad = -np.dot(X.T, pred_error) #dim = (feature_size, )\n b_grad = -pred_error.sum(axis = 0)\n return w_grad, b_grad\n\n def compute_logistic_value(self, X, w, b):\n return self.sigmoid(np.matmul(X, w) + b)\n\n def compute_cross_entropy_loss(self, y_pred, y_true):\n eps = 1e-12\n y_pred = np.clip(y_pred, eps, 1-eps)\n cross_entropy = -np.dot(y_true, np.log(y_pred )) - np.dot((1-y_true), np.log(1 - y_pred))\n return cross_entropy\n\n def compute_accuracy(self, y_pred, y_true):\n accuracy = 1 - np.mean(np.abs(y_pred - y_true))\n return accuracy\n\n def sigmoid(self, z):\n res = 1 / (1.0 + np.exp(-z))\n return np.clip(res, 1e-6, 1 - (1e-6))","repo_name":"shengyenlin/Machine-Learning-EEML-2021-Fall","sub_path":"hw2/handcraft_logistic_regression.py","file_name":"handcraft_logistic_regression.py","file_ext":"py","file_size_in_byte":6809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"39441922472","text":"\"\"\"\n100. Same Tree\n\"\"\"\n\nfrom typing import Optional\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def isSameTree(self, p: Optional[TreeNode], q: Optional[TreeNode]) -> bool:\n if not p and not q:\n return True\n elif p and q and p.val == q.val:\n return self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)\n else:\n return False\n\n\np = TreeNode(3)\np.left = TreeNode(9)\np.right = TreeNode(20)\np.right.left = TreeNode(15)\np.right.right = TreeNode(7)\n\nq = TreeNode(3)\nq.left = TreeNode(9)\nq.right = TreeNode(20)\nq.right.left = TreeNode(15)\nq.right.right = TreeNode(7)\n\nprint(f\"isSameTree: {Solution().isSameTree(p, q)}\")\n","repo_name":"hrishikeshtak/Coding_Practises_Solutions","sub_path":"leetcode/LeetCode-150/Trees/100-Same-Tree.py","file_name":"100-Same-Tree.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"18966040675","text":"from __future__ import print_function\nfrom zprocess import Process, ProcessTree\nimport os\n\nclass Foo(Process):\n def run(self, data):\n print('this is a running foo in process', os.getpid())\n print('data is', data)\n message = self.from_parent.get()\n print('foo, got a message:', message)\n self.to_parent.put('hello yourself!')\n\n# This __main__ check is important to stop the same code executing again in the child:\nif __name__ == '__main__':\n\n process_tree = ProcessTree()\n foo = Foo(process_tree)\n to_child, from_child = foo.start('bar')\n to_child.put('hello, foo!')\n response = from_child.get()\n print('parent, got a response:', response)\n","repo_name":"chrisjbillington/zprocess","sub_path":"zprocess/examples/processclass_example.py","file_name":"processclass_example.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"}
+{"seq_id":"23213779238","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef toyExample(imageSize, numImages):\n \n imageSize = np.array(imageSize)\n\n ambientImage = np.zeros(imageSize)\n imArray = np.zeros((imageSize[0], imageSize[1], numImages))\n trueAlbedo = np.zeros((imageSize[0], imageSize[1]))\n\n #Generate the scene\n r = np.floor(np.min(imageSize)/2)-1\n ctr = np.ceil(imageSize/2)\n cy = int(ctr[0])\n cx = int(ctr[1])\n\n #Lay down a meshgrid to compute the x and y coordinates\n xx, yy = np.meshgrid(np.arange(imageSize[1]), np.arange(imageSize[0]))\n dd = r*r - (xx-cx)**2 - (yy-cy)**2\n bg = dd <= 0\n dd[bg] = 0\n trueHeightMap = np.sqrt(dd)\n\n #Normals for the foregroud are based on the point on the hemisphere\n distance = np.sqrt((xx-cx)**2 + (yy-cy)**2 + trueHeightMap**2)\n nx = -(xx-cx)/distance\n ny = -(yy-cy)/distance\n nz = trueHeightMap/distance\n\n #Normals for the background are [0 0 1]\n nx[bg] = 0\n ny[bg] = 0\n nz[bg] = 1\n\n trueSurfaceNormals = np.concatenate((nx[:, :, np.newaxis],\n ny[:, :, np.newaxis], nz[:, :, np.newaxis]), axis=2)\n\n #Albedo (checkered pattern)\n trueAlbedo[0:cy, 0:cx] = 1\n trueAlbedo[0:cy, cx:] = 0.3\n trueAlbedo[cy:, 0:cx] = 0.3\n trueAlbedo[cy:, cx:] = 1\n trueAlbedo[bg] = 0.5\n\n #Generate random samples of light directions and images\n lightDirs = np.random.randn(numImages, 3)\n lightDirs[:, 2] = np.abs(lightDirs[:, 2])\n l2norm = np.sqrt(np.sum(lightDirs**2, axis=1))\n lightDirs = lightDirs/l2norm[:, np.newaxis]\n normalArray = trueSurfaceNormals.reshape((imageSize[0]*imageSize[1], 3))\n for i in range(numImages):\n img = trueAlbedo.flatten() * normalArray.dot(lightDirs[i, :])\n imArray[:, :, i] = img.reshape(imageSize)\n\n imArray = np.maximum(imArray, 0)\n\n return (ambientImage, imArray, lightDirs, trueAlbedo, trueSurfaceNormals, trueHeightMap)\n\n\nif __name__ == '__main__':\n ambient, imarray, lightdirs, truealbedo, normals, height = toyExample((128, 128), 10)\n","repo_name":"souwang324/Photometric-Stereo","sub_path":"toyExample.py","file_name":"toyExample.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"}
+{"seq_id":"25571077347","text":"from map_util import *\nfrom util import *\nfrom log import *\nimport random\n\ndef door_action_start(key_sym, world): #передавать stateSys? объект у кот вызывать? передавать функ?\n if key_sym == 'o':\n log_msg('Открыть дверь в какой стороне?', world)\n else:\n log_msg('Закрыть дверь в какой стороне?', world)\n world.stateSystem.changeState('open_door')\n\ndef open_door(door):\n if door.opened:\n door.opened = False\n door.passable= False\n door.char = door.close_char\n return OPEN_CLOSED\n else:\n door.opened = True\n door.passable= True\n door.char = door.open_char\n return OPEN_OPEND\n\ndef try_open_door(world, x, y, actor, objects): #describe status\n x += actor.x\n y += actor.y\n obj = object_at(Point(x, y), objects)\n if obj:\n if obj.can_open:\n if obj.need_key:\n return OPEN_NEED_KEY, obj\n else:\n status = open_door(obj)\n tire(world, actor.arms)\n return status, obj\n else:\n return OPEN_CANNOT, obj\n else:\n return OPEN_NODOOR, obj\n\ndef go_down(_, world): # TODO все движущиеся должны менять и на карте положение\n actor = world.player\n if can_be_there(actor.x, actor.y + 1, world):\n actor.y += 1 # move_to(x, y, obj, objects)\n tire(world, world.player.legs)\n\ndef go_up(_, world): \n actor = world.player\n if can_be_there(actor.x, actor.y - 1, world):\n actor.y -= 1\n tire(world, world.player.legs)\n\ndef go_left(_, world):\n actor = world.player\n if can_be_there(actor.x - 1, actor.y, world):\n actor.x -= 1\n tire(world, world.player.legs)\n\ndef go_right(_, world): \n actor = world.player\n if can_be_there(actor.x + 1, actor.y, world):\n actor.x += 1\n tire(world, world.player.legs)\n\ndef do_search(_, world): \n objs = objects_in_view(world.player, world)\n found = ''\n for obj in objs:\n if obj.contain:\n if obj.need_key:\n found += obj.name + ' заперт'\n else:\n for obj_in_container in obj.contain:\n found += (obj.name + ' содержит ' + obj_in_container.name)\n elif obj.info_msg:\n log_main(obj.search_msg, lblue)\n if obj.search_msg:\n log_main(obj.search_msg, lblue)\n if not found:\n log_msg('Ничего необычного', world)\n else:\n log_msg(found, world)\n\ndef do_take(_, world):\n log_msg('Взять откуда?', world)\n direction_do(world, take_from)\n\ndef take_from(x, y, world, _):\n obj = object_at_xy(x, y, world.objects)\n if obj:\n if obj.takeable:\n log_msg('Беру ' + obj.name, world)\n log_main('Вы берёте ' + obj.name, lblue)\n inventory_add(obj, world.inventory)\n remove_obj(obj, world.objects)\n tire(world, world.player.arms)\n else:\n if obj.contain: # пока не содержат более одного объекта\n if obj.need_key:\n log_main(obj.name + ' заперт', lred)\n return\n contaiment = obj.contain[0]\n log_msg('Беру из {} {}'.format(obj.name, contaiment.name), world)\n log_main('Вы берёте {} из {}'.format(contaiment.name, obj.name))\n obj.contain = False\n inventory_add(contaiment, world.inventory)\n tire(world, world.player.arms)\n else:\n log_msg('Это нельзя брать.', world)\n else:\n log_msg('Здесь нечего брать.', world)\n\ndef inventory_add(obj, inventory):\n inventory.append(obj)\n\ndef direction_do(world, fun, *args):\n world.direction_action = fun\n world.direction_args = args\n world.stateSystem.changeState('direction')\n\ndef direction_keypress(key_sym, world):\n direction = get_direction(key_sym)\n if direction:\n x, y = direction\n x = world.player.x + x\n y = world.player.y + y\n world.direction_action(x, y, world, world.direction_args)\n world.stateSystem.changeState('walk')\n else:\n log_msg('Неправильное направление. ', world)\n\ndef get_direction(key_sym):\n keyboard_fun = {\n 'j':lambda _: (0, 1),\n 'k':lambda _: (0, -1),\n 'h':lambda _: (-1, 0),\n 'l':lambda _: (1, 0),\n '.':lambda _: (0, 0),\n }\n fun = keyboard_fun.get(key_sym, False)\n if fun:\n return fun(0)\n else:\n return False\n\ndef inventory_wear_action(world, obj):\n world.stateSystem.changeState('walk')\n if not obj.wearable:\n log_main('Нельзя надеть {}'.format(obj.name))\n return False\n log_main('Надеваю {}'.format(obj.name))\n obj.weared = True\n parts = {\n 'body':world.player.body,\n 'legs':world.player.legs,\n }\n part = parts.get(obj.part, False)\n if part:\n part.weared = obj\n\nINVENTORY_VIEW_ITEM = 'v'\nINVENTORY_APPLY_ITEM = 'a'\nINVENTORY_EAT_ITEM = 'e'\nINVENTORY_WEAR_ITEM = 'W'\ndef do_inventory_action(world, action, object_index):\n inventory_actions = {\n INVENTORY_VIEW_ITEM: inventory_view_action,\n INVENTORY_APPLY_ITEM: lambda w, x: direction_do(w, inventory_apply_action, x),\n INVENTORY_EAT_ITEM: inventory_eat_action,\n INVENTORY_WEAR_ITEM: inventory_wear_action,\n }\n if object_index < len(world.inventory):\n fun = inventory_actions.get(action, False)\n if fun:\n fun(world, world.inventory[object_index])\n \ndef inventory_eat_action(world, obj):\n world.stateSystem.changeState('walk')\n if obj.contain: #water max=2\n log_main('Вы пьёте {}, оставляя {}'.format(obj.name, obj.reminder.name))\n water_get(world.player, obj.contain[0].volume/2 * 100)\n inventory_add(obj.reminder, world.inventory)\n remove_obj_from_inventory(world, obj)\n elif obj.eatable:\n log_main('Вы едите {}, оставляя {}'.format(obj.name, obj.reminder.name))\n tire(world, world.player.body, obj.digestion_energy)\n add_energy(world.player, obj.sugar)\n inventory_add(obj.reminder, world.inventory)\n #TODO lipids to stock_energy and complex_carbons and proteins\n remove_obj_from_inventory(world, obj)\n else:\n log_main('{} не съедобно'.format(obj.name))\n\ndef remove_obj_from_inventory(world, obj):\n world.inventory.remove(obj)\n\ndef inventory_apply_action(x, y, world, applicator):\n pacient = object_at_xy(x, y, world.objects)\n if pacient:\n applicator = applicator[0]\n log_main(\"Пытатетесь применить {} к {}...\".format(applicator.name, pacient.name))\n result = object_apply(applicator, pacient, world)\n if not result:\n log_main(\"не получилось\", lred)\n else:\n log_msg('Не к чему применять', world)\n\ndef object_apply(applicator, pacient, world):\n objects_apply_table = { # to world?\n 4001: {\n 4000: try_key_door,\n },\n 5002:{\n 4002: extract_with_hammer,\n }\n }\n table2 = objects_apply_table.get(applicator.id, False)\n if not table2:\n return False\n fun = table2.get(pacient.id, False)\n if fun:\n return fun(applicator, pacient, world)\n else:\n return False\n\ndef extract_with_hammer(hammer, wall, world):\n if wall.contain:\n log_main('Извлекаете с помощью {} из {} {}'.format(hammer.name, wall.name, wall.contain[0].name), lgreen)\n inventory_add(wall.contain[0], world.inventory)\n wall.contain = False\n tire(world, world.player.arms, 0.5)\n wall.walk_msg = wall.search_msg = wall.empty_msg\n else:\n log_main('Ничего нет')\n return True\n\n\ndef try_key_door(key, door, world):\n log_main('Пытатетесь открыть '+ door.name +' ключом...')\n if door.need_key:\n tire(world, world.player.arms)\n if key.key_id == door.key_id:\n log_main('Ключ подошёл, отпирате.', lgreen)\n door.key_used = True\n door.need_key = False\n else:\n log_main('Ключ не подходит', lred)\n else:\n log_main(door.name + ' не запертo')\n return True\n\ndef inventory_view_action(world, obj):\n world.messages.object_info = list()\n world.messages.object_info.append(obj.name)\n if obj.info_msg:\n world.messages.object_info.append(obj.info_msg)\n world.stateSystem.changeState('inventory_view_object')\n\ndef go_inventory(key_sym, world):\n world.stateSystem.changeState('inventory')\n world.inventory_action = key_sym\n\ndef do_smash(_, world):\n log_msg('Сломать где?', world)\n direction_do(world, smash_at)\n\ndef smash_at(x, y, world, _):\n obj = object_at_xy(x, y, world.objects)\n if obj:\n if obj.smashable:\n log_main('Вы пытаетесь сломать ' + obj.name)\n #TODO усталость? или статус и потом обработка или сообщение\n tire(world, world.player.legs, 0.5)\n if obj.need_strength_type == 'LEG':\n strength = world.player.legs.strength\n actual_probability = calc_smash_probablity(strength, obj.need_strength, obj.smash_probability)\n if take_chance(actual_probability):\n log_main('Получилось сломать', lgreen)\n obj.can_open = True\n obj.smashable = False\n obj.walk_msg = ''\n obj.search_msg = obj.smashed_msg\n else:\n log_main('Неполучилось сломать', lred)\n else:\n log_main('Это нельзя cломать')\n else:\n log_main('Здесь нечего ломать')\n\ndef calc_smash_probablity(strength, need, probablity):\n p = strength/need * probablity\n return p if p <= 1.0 else 1.0\n\ndef take_chance(probablity):\n dice = random.random()\n return dice < probablity\n\ndef tick(world):\n world.tick += 1\n if world.tick_events.contain(str(world.tick)):\n event = world.tick_events.get(str(world.tick))\n if event.type == 'MSG':\n log_main(event.msg, blue)\n print(world.tick)\n if not world.player.live:\n print('you not live, bye')\n exit()\n energy_exchange(world.player)\n water_loss(world)\n\ndef add_energy(player, val):\n player.available_energy += val\n if player.available_energy > player.max_available:\n player.stock_energy += player.available_energy - player.max_available\n player.available_energy = player.max_available\n\ndef energy_exchange(player):\n spend_energy(player, 0.01)\n if player.available_energy < player.max_available/3:\n energy_flow(player, 1)\n #TODO\n pass \n\ndef energy_flow(player, val):\n if not dec_stock_energy(player, val):\n player.available_energy += val\n if player.available_energy > player.max_available:\n player.available_energy = player.max_available\n\ndef dec_stock_energy(player, val):\n player.stock_energy -= val\n if player.stock_energy <= 0:\n player.stock_energy = 0\n return True\n return False\n\ndef dec_available_energy(player, val):\n player.available_energy -= val\n if player.available_energy < 0.0 and player.stock_energy > 0.0:\n player.stock_energy -= 0.1\n player.available_energy += 0.1\n\ndef spend_energy(player, val):\n if player.available_energy > 0.0:\n dec_available_energy(player, val)\n #tick()\n if not (player.available_energy > 0.0 or player.stock_energy > 0.0):\n player.live = False\n log_main('Ваша энегрия иссякла, вы больше не можете функционировать.', red)\n\ndef rest(n, world):\n for i in range(n):\n if world.player.available_energy > 0.0:\n spend_energy(world.player, 0.1)\n tick(world)\n restore = world.player.legs.max_stamina/100.0\n rest_part(world.player.legs, restore)\n restore = world.player.arms.max_stamina/100.0\n rest_part(world.player.arms, restore)\n restore = world.player.body.max_stamina/100.0\n rest_part(world.player.body, restore)\n\ndef rest_part(part, val):\n part.stamina += val\n if part.stamina > part.max_stamina:\n part.stamina = part.max_stamina\n\ndef calc_water_loss(world):\n player_temp = calc_avg_temp(world.player)\n env_temp = world.rooms.current.temp\n #TODO\n loss = (env_temp**2)/4000\n return loss if loss > 0.1 else 0.1\n\ndef water_get(player, val):\n player.water_level += val\n if player.water_level > 100.0:\n player.water_level = 100\n log_main('Выпили слишком много, всё не осилили, зря потратили')\n\ndef water_loss(world):\n world.player.water_level -= calc_water_loss(world)\n if world.player.water_level < 0:\n world.player.water_level = 0\n\ndef calc_weak_coeff(world):\n coef = 1.0\n if world.player.water_level < 30:\n coef = 40/(world.player.water_level + 10)\n return coef\n\ndef calc_avg_temp(player):\n return (player.body.temp + player.legs.temp + player.arms.temp + player.head.temp) / 4\n\ndef tire(world, part, amount=0.1):\n amount *= calc_weak_coeff(world)\n part.stamina -= amount\n if part.stamina <= 0.01:\n amount = part.stamina + amount\n part.stamina = 0\n log_main('Ваши {} полностью устали, вы упали без сил'.format(part.name), red)\n rest(10, world)\n log_main('Вы немного отдохнули', lred)\n sub_strength_part(part, amount/10.0)\n train_stamina(part, amount)\n train_strength(part, amount)\n\ndef tired(part):\n return part.stamina == 0\n\ndef train_stamina(part, amount):\n part.max_stamina += amount/10.0\n\ndef train_strength(part, amount):\n part.max_strength += amount/20.0\n\ndef do_warmup(_, world):\n log_main('Вы делаете зарядку.', lgreen)\n actor = world.player\n if warm_up_all(world, actor):\n log_main('Вы чувствуете себя сильнее.', green)\n else:\n log_main('Никакого эффекта, только устали.', lred)\n \ndef warm_up_all(world, actor):\n b = warm_up_part(world, actor.body)\n l = warm_up_part(world, actor.legs)\n a = warm_up_part(world, actor.arms)\n return any([b,l,a])\n\ndef warm_up_part(world, part):\n if part.stamina == part.max_stamina:\n tire(world, part)\n return add_strength_part(part)\n tire(world, part)\n return False\n\ndef sub_strength_part(part, amount):\n part.strength -= amount\n if part.strength < 0:\n part.strength = 0\n\ndef add_strength_part(part):\n if part.strength == part.max_strength:\n return False\n part.strength += 1\n if part.strength > part.max_strength:\n part.strength = part.max_strength\n return True\n","repo_name":"anokata/pythonPetProjects","sub_path":"quest/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":15411,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"}
+{"seq_id":"34538700758","text":"import collections as co\nimport functools as ft\n\nimport sympy\nimport torch\nfrom sympy import (Dummy, I, S, cos, exp, factorial, latex, pi, sin, sqrt,\n symbols, sympify, var)\n\n#---------------- Torch device\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n#------------------\n\n\ndef _reduce(fn):\n def fn_(*args):\n return ft.reduce(fn, args)\n\n return fn_\n\n\n_global_func_lookup = {\n sympy.Mul: _reduce(torch.mul),\n sympy.Add: _reduce(torch.add),\n sympy.div: torch.div,\n sympy.Abs: torch.abs,\n sympy.sign: torch.sign,\n # Note: May raise error for ints.\n sympy.ceiling: torch.ceil,\n sympy.floor: torch.floor,\n sympy.log: torch.log,\n sympy.exp: torch.exp,\n sympy.sqrt: torch.sqrt,\n sympy.cos: torch.cos,\n sympy.acos: torch.acos,\n sympy.sin: torch.sin,\n sympy.asin: torch.asin,\n sympy.tan: torch.tan,\n sympy.atan: torch.atan,\n sympy.atan2: torch.atan2,\n # Note: May give NaN for complex results.\n sympy.cosh: torch.cosh,\n sympy.acosh: torch.acosh,\n sympy.sinh: torch.sinh,\n sympy.asinh: torch.asinh,\n sympy.tanh: torch.tanh,\n sympy.atanh: torch.atanh,\n sympy.Pow: torch.pow,\n sympy.re: torch.real,\n sympy.im: torch.imag,\n sympy.arg: torch.angle,\n # Note: May raise error for ints and complexes\n sympy.erf: torch.erf,\n sympy.loggamma: torch.lgamma,\n sympy.Eq: torch.eq,\n sympy.Ne: torch.ne,\n sympy.StrictGreaterThan: torch.gt,\n sympy.StrictLessThan: torch.lt,\n sympy.LessThan: torch.le,\n sympy.GreaterThan: torch.ge,\n sympy.And: torch.logical_and,\n sympy.Or: torch.logical_or,\n sympy.Not: torch.logical_not,\n sympy.Max: torch.max,\n sympy.Min: torch.min,\n # Matrices\n sympy.MatAdd: torch.add,\n sympy.HadamardProduct: torch.mul,\n sympy.Trace: torch.trace,\n # Note: May raise error for integer matrices.\n sympy.Determinant: torch.det,\n sympy.core.numbers.ImaginaryUnit: lambda *args: torch.complex(\n torch.Tensor([0]), torch.Tensor([1])\n ).to(device),\n}\n\n\nclass _Node(torch.nn.Module):\n def __init__(self, *, expr, _memodict, _func_lookup, **kwargs):\n super().__init__(**kwargs)\n\n self._sympy_func = expr.func\n # print(expr, expr.func, expr.args)\n try:\n expr = sympy.Float(expr)\n # print(expr, expr.func, expr.args, sympy.Float(expr))\n except:\n pass\n if issubclass(expr.func, sympy.Float):\n self._value = torch.nn.Parameter(torch.tensor(float(expr)))\n self._torch_func = lambda: self._value\n self._args = ()\n elif issubclass(expr.func, sympy.UnevaluatedExpr):\n if len(expr.args) != 1 or not issubclass(expr.args[0].func, sympy.Float):\n raise ValueError(\"UnevaluatedExpr should only be used to wrap floats.\")\n self.register_buffer(\"_value\", torch.tensor(float(expr.args[0])))\n self._torch_func = lambda: self._value\n self._args = ()\n elif issubclass(expr.func, sympy.Integer):\n # Can get here if expr is one of the Integer special cases,\n # e.g. NegativeOne\n self._value = int(expr)\n self._torch_func = lambda: self._value\n self._args = ()\n elif issubclass(expr.func, sympy.Symbol):\n self._name = expr.name\n self._torch_func = lambda value: value\n self._args = ((lambda memodict: memodict[expr.name]),)\n else:\n self._torch_func = _func_lookup[expr.func]\n args = []\n for arg in expr.args:\n try:\n arg_ = _memodict[arg]\n except KeyError:\n arg_ = type(self)(\n expr=arg,\n _memodict=_memodict,\n _func_lookup=_func_lookup,\n **kwargs\n )\n _memodict[arg] = arg_\n args.append(arg_)\n self._args = torch.nn.ModuleList(args)\n\n def sympy(self, _memodict):\n if issubclass(self._sympy_func, sympy.Float):\n return self._sympy_func(self._value.item())\n elif issubclass(self._sympy_func, sympy.UnevaluatedExpr):\n return self._sympy_func(self._value.item())\n elif issubclass(self._sympy_func, sympy.Integer):\n return self._sympy_func(self._value)\n elif issubclass(self._sympy_func, sympy.Symbol):\n return self._sympy_func(self._name)\n else:\n args = []\n for arg in self._args:\n try:\n arg_ = _memodict[arg]\n except KeyError:\n arg_ = arg.sympy(_memodict)\n _memodict[arg] = arg_\n args.append(arg_)\n return self._sympy_func(*args)\n\n def forward(self, memodict):\n args = []\n for arg in self._args:\n try:\n arg_ = memodict[arg]\n except KeyError:\n arg_ = arg(memodict)\n memodict[arg] = arg_\n args.append(arg_)\n return self._torch_func(*args)\n\n\nclass SymPyModule(torch.nn.Module):\n def __init__(self, *, expressions, extra_funcs=None, **kwargs):\n super().__init__(**kwargs)\n\n if extra_funcs is None:\n extra_funcs = {}\n _func_lookup = co.ChainMap(_global_func_lookup, extra_funcs)\n\n _memodict = {}\n self._nodes = torch.nn.ModuleList(\n [\n _Node(expr=expr, _memodict=_memodict, _func_lookup=_func_lookup)\n for expr in expressions\n ]\n )\n\n def sympy(self):\n _memodict = {}\n return [node.sympy(_memodict) for node in self._nodes]\n\n def forward(self, **symbols):\n return torch.stack([node(symbols) for node in self._nodes], dim=-1)\n\n\nclass SymModule(torch.nn.Module):\n def __init__(self, sym):\n super().__init__()\n self.sym = SymPyModule(expressions=[sym])\n\n def forward(self, phi, theta):\n res = self.sym(theta=theta, phi=phi)\n if \"complex\" not in str(res.dtype):\n return res, torch.zeros_like(res).to(device)\n else:\n return res.real, res.imag\n","repo_name":"ptigas/geoeffectivenet","sub_path":"utils/sympy.py","file_name":"sympy.py","file_ext":"py","file_size_in_byte":6266,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"}
+{"seq_id":"33049110656","text":"## =========================================================================\n## @author Leonardo Florez-Valencia (florez-l@javeriana.edu.co)\n## =========================================================================\n\nimport math, random, sys\n\n## -------------------------------------------------------------------------\ndef ReadPGM( filename ):\n stream = open( filename )\n assert stream.readline( ) == 'P2\\n'\n\n ( width, height ) = [ int( i ) for i in stream.readline( ).split( ) ]\n depth = int( stream.readline( ) )\n assert depth <= 255\n\n pgm_buffer = []\n for y in range( height ):\n row = []\n for x in range( width ):\n row.append( int( stream.readline( ) ) )\n # end for\n pgm_buffer.append( row )\n # end for\n stream.close( )\n return pgm_buffer\n# end def\n\n## -------------------------------------------------------------------------\ndef SavePGM( filename, pgm_buffer ):\n ( width, height ) = [ len( pgm_buffer ), len( pgm_buffer[ 0 ] ) ]\n stream = open( filename, 'w' )\n stream.write( 'P2\\n' + str( width ) + ' ' + str( height ) + '\\n255\\n' )\n\n for x in range( width ):\n for y in range( height ):\n stream.write( str( pgm_buffer[ x ][ y ] ) + '\\n' )\n # end for\n # end for\n stream.close( )\n# end def\n\n## -------------------------------------------------------------------------\ndef Sigmoid( z ):\n if -10 <= z and z <= 10:\n return 1.0 / ( 1.0 + math.exp( -float( z ) ) )\n elif z < -10:\n return 0.0\n else:\n return 1.0\n # end if\n# end def\n\n## -------------------------------------------------------------------------\ndef CostSigmoid( W, b, X, Y ):\n m = len( X )\n n = len( X[ 0 ] )\n J = 0.0\n for i in range( m ):\n z = float( b )\n for j in range( n ):\n z += float( W[ j ] ) * float( X[ i ][ j ] )\n # end for\n h = Sigmoid( z )\n if Y[ i ] == 1:\n J -= math.log( h + 1e-8 )\n else:\n J -= math.log( 1.0 - h + 1e-8 )\n # end if\n # end for\n return J / float( m )\n# end def\n\n## -------------------------------------------------------------------------\ndef WDerivativeSigmoid( W, b, X, Y ):\n m = len( X )\n n = len( X[ 0 ] )\n dw = [ 0.0 for i in range( len( X[ 0 ] ) ) ]\n\n for i in range( m ):\n z = float( b )\n for j in range( n ):\n z += float( W[ j ] ) * float( X[ i ][ j ] )\n # end for\n h = Sigmoid( z )\n\n for j in range( n ):\n dw[ j ] -= float( X[ i ][ j ] ) * ( float( Y[ i ] ) - h )\n # end for\n # end for\n\n for j in range( n ):\n dw[ j ] /= float( m )\n # end for\n return dw\n# end def\n\n## -------------------------------------------------------------------------\ndef BDerivativeSigmoid( W, b, X, Y ):\n m = len( X )\n n = len( X[ 0 ] )\n db = 0.0\n for i in range( m ):\n z = float( b )\n for j in range( n ):\n z += float( W[ j ] ) * float( X[ i ][ j ] )\n # end for\n h = Sigmoid( z )\n db -= float( Y[ i ] ) - h\n # end for\n return db / float( m )\n# end def\n\n## -------------------------------------------------------------------------\ndef Tanh( z ):\n return math.tanh( float( z ) )\n# end def\n\n## -------------------------------------------------------------------------\ndef CostTanh( W, b, X, Y ):\n J = 0.0\n return J\n# end def\n\n## -------------------------------------------------------------------------\ndef WDerivativeTanh( W, b, X, Y ):\n dw = [ 0.0 for i in range( len( X[ 0 ] ) ) ]\n return dw\n# end def\n\n## -------------------------------------------------------------------------\ndef BDerivativeTanh( W, b, X, Y ):\n db = 0.0\n return db\n# end def\n\n## -------------------------------------------------------------------------\ndef TrainLogisticRegression( X, Y, a = 1e-1, e = 1e-8 ):\n n = len( X[ 0 ] )\n W = [ random.uniform( -1, 1 ) for i in range( len( X[ 0 ] ) ) ]\n b = random.uniform( -1, 1 )\n J = CostSigmoid( W, b, X, Y )\n dJ = math.inf\n i = 0\n\n while e < dJ:\n dw = WDerivativeSigmoid( W, b, X, Y )\n db = BDerivativeSigmoid( W, b, X, Y )\n\n for j in range( n ):\n W[ j ] -= a * dw[ j ]\n # end for\n b -= a * db\n\n Jn = CostSigmoid( W, b, X, Y )\n dJ = J - Jn\n\n print( 'Iteration:', i, ': dJ =', dJ )\n \n J = Jn\n i += 1\n # end while\n\n return [ W, b ]\n# end def\n\n## -------------------------------------------------------------------------\ndef EvalLogisticRegression( W, b, x ):\n assert len( W ) == len( x )\n\n z = b\n for i in range( len( W ) ):\n z += W[ i ] * x[ i ]\n # end for\n\n return Sigmoid( z )\n\n# end def\n\n## -------------------------------------------------------------------------\nif len( sys.argv ) < 3:\n print( \"Usage:\", sys.argv[ 0 ], \"input_pgm_file output_pgm_file\" )\n sys.exit( 1 )\n# end if\n\n# Read an image and convert it to examples\npgm_image = ReadPGM( sys.argv[ 1 ] )\nX = []\nY = []\nfor i in range( len( pgm_image ) ):\n for j in range( len( pgm_image[ i ] ) ):\n X.append( [ float( i ), float( j ) ] )\n Y.append( float( pgm_image[ i ][ j ] ) )\n # end for\n# end for\n\n# Train parameters\n[ W, b ] = TrainLogisticRegression( X, Y, a = 1e-4 )\n\n# Use parameters\nfor i in range( len( pgm_image ) ):\n for j in range( len( pgm_image[ i ] ) ):\n x = [ float( i ), float( j ) ]\n h = EvalLogisticRegression( W, b, x )\n pgm_image[ i ][ j ] = int( 255.0 * h )\n # end for\n# end for\n\n# Save results\nSavePGM( sys.argv[ 2 ], pgm_image )\n\n## eof - logistic_regression_01.py\n","repo_name":"florez-l/ivqML","sub_path":"examples/02_logistic_regression/python3/logistic_regression_01.py","file_name":"logistic_regression_01.py","file_ext":"py","file_size_in_byte":5293,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"}
+{"seq_id":"34829411748","text":"def utilita():\n print ('way - перемещение между папками.')\n print('create - команда на создание папки')\n print ('delete - команда на удаление папки')\n print('exit - выход')\n\n \n\n while True:\n import Modul.easy as ex\n\n \n print(ex.tree())\n\n print('Введите команду!')\n print ('way(name,reverse=False) - перемещение между папками. Если перемещение оскществляется обратно, вместе с именем предается команда reverse = True')\n print('create(name) - команда на создание папки')\n print ('delete() - команда на удаление папки')\n print('exit - выход')\n\n command = input()\n\n if command == 'create':\n name = input('Введите имя файла: ')\n ex.create(name)\n continue\n elif command =='delete':\n name = input('Введите имя файла: ')\n ex.delete(name)\n continue\n elif command =='way':\n name = input('Введите имя файла: ')\n ret = input('up or down: ')\n if ret == 'up':\n ex.way(name)\n continue\n elif ret == 'down':\n ex.way(name,reverse = True)\n continue\n else:\n print('Такой команды нет, попробуйте еще раз!')\n continue\n elif command == 'exit':\n break\n else:\n print('Такой команды нет, попробуйте еще раз!')\n continue\n \n \n\n \n\n\n\n \n \n \n","repo_name":"AlesandroAndreev/Python_Start","sub_path":"Lesson_5/utilita.py","file_name":"utilita.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"30951817097","text":"# -*- coding: utf-8 -*-\nimport tensorflow as tf\nfrom database import dbProxy as dbProxy\nfrom pymongo import MongoClient\nfrom tensorflow import keras\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import interp1d\nfrom scipy.optimize import curve_fit\nimport tensorflow as tf\n#import high_order_layers.PolynomialLayers as poly\n\nclient = MongoClient('mongodb://10.1.30.32:27017/')\ndb = client['EBT_films_dose']\ncollectionTifProvider = db['tifProvider']\ndata1 = dbProxy.getData4CalibrationCurveWithDoseHighLimit(collectionTifProvider, facility='Co-60 (MRRC)',\n ebtLotNo='05062003',\n hoursAfterIrrad=24, doseLimit=50.0)\n\nprint(data1)\nx = []\ny = []\nfor i in data1:\n x.append(data1[i])\n y.append(i)\n\n#model = tf.keras.Sequential()\n\ninterpFunc = interp1d(x, y)\ndef interpFunc2(od, a, b, c, d, e):\n func = np.poly1d([a,b,c, d, e])\n return func(od)\n\n\n# mnist = tf.keras.datasets.mnist\n#\n# (x_train, y_train),(x_test, y_test) = mnist.load_data()\n# x_train, x_test = (x_train / 128.0-1.0), (x_test / 128.0-1.0)\n#\n# units = 20\n#\n# basis = poly.b3\n#\n# model = tf.keras.models.Sequential([\n# tf.keras.layers.Flatten(input_shape=(28, 28)),\n# poly.Polynomial(units, basis=basis, shift=0.0),\n# tf.keras.layers.LayerNormalization(),\n# poly.Polynomial(units, basis=basis, shift=0.0),\n# tf.keras.layers.LayerNormalization(),\n# poly.Polynomial(units, basis=basis, shift=0.0),\n# tf.keras.layers.LayerNormalization(),\n# poly.Polynomial(units, basis=basis, shift=0.0),\n# tf.keras.layers.LayerNormalization(),\n# tf.keras.layers.Dense(10, activation='softmax')\n# ])\n#\n# model.compile(optimizer='adam',\n# loss='sparse_categorical_crossentropy',\n# metrics=['accuracy'])\n#\n# model.fit(x_train, y_train, epochs=20, batch_size=10)\n# model.evaluate(x_test, y_test)\n\n#model = tf.keras.Sequential([tf.keras.layers.Flatten(input_shape=(1,)),\n# poly.Polynomial(50, basis=poly.b3, shift=0.0),\n# tf.keras.layers.LayerNormalization(),\n# tf.keras.layers.Dense(1, activation='softmax')\n#])\n# model = tf.keras.Sequential()\n# xLr = model.add(tf.keras.layers.Input(shape=(1,)))\n# yLr = model.add(tf.keras.layers.Dense(units=1, activation='linear'))\n#model.add(tf.keras.layers.Dense(units=200, input_dim=1))\n#model.add(tf.keras.layers.Dense(units=32, activation='relu'))\n#model.add(tf.keras.layers.Dense(units=64, activation='relu'))\n#model.add(tf.keras.layers.Dense(units=1, activation='sigmoid'))\n# #model.add(tf.keras.layers.Activation('relu'))\n# #model.add(tf.keras.layers.Dense(units=45))\n# #model.add(tf.keras.layers.Activation('relu'))\n# #model.add(tf.keras.layers.Dense(units=1))\n# #model.add(tf.keras.layers.Dense(units=1))\n# model.add(keras.layers.Activation('sigmoid'))\n# model.add(keras.layers.Dense(units=250))\n#\n# model.add(keras.layers.Activation('tanh'))\n# model.add(keras.layers.Dense(units=200))\n#\n# model.add(keras.layers.Activation('tanh'))\n# model.add(keras.layers.Dense(units=150))\n#\n# model.add(keras.layers.Activation('tanh'))\n# model.add(keras.layers.Dense(units=100))\n#\n# model.add(keras.layers.Activation('tanh'))\n# model.add(keras.layers.Dense(units=50))\n#\n# model.add(keras.layers.Activation('linear'))\n# model.add(keras.layers.Dense(units=1))\n#\n#model.compile(loss='mean_squared_error',\n# optimizer='sgd')\n# loss_fn = tf.keras.losses.MeanSquaredError(reduction='sum_over_batch_size')\n# model.compile(optimizer='adam',\n# #loss='sparse_categorical_crossentropy',\n# loss=loss_fn,\n# metrics=['accuracy'])\n#\n# #model.compile(#optimizer=tf.keras.optimizers.Adam(0.01),\n# # loss='mean_squared_error',\n# # #metrics=['accuracy']\n# # optimizer='sgd',\n# # )\n#\n# x1 = np.linspace(0.03, 0.7, 1000)\n# y1 = interpFunc(x1)\n# p_opt, p_cov = curve_fit(interpFunc2, x, y)\n# y2 = interpFunc2(x1, *p_opt)\n#\n# x1 = x1 / np.max(x1)\n# y2 = y2 / np.max(y2)\n#\n# model.fit(x1, y2, epochs=30, batch_size=50)\n#\nxTrain = np.linspace(0.0, 0.7, 100)\n#\n# loss_and_metrics = model.evaluate(x, y, batch_size=100)\n#\n# classes = model.predict(xTrain, batch_size=1)\n#\n# plt.plot(x, y, \"*\")\n# plt.plot(xTrain, classes)\n#\n# #plt.plot(xTrain, interpFunc(xTrain))\n# plt.show()\n#feats = tf.estimator.infer_real_valued_columns_from_input(x)\nfeats = tf.contrib.learn.infer_real_valued_columns_from_input(xTrain)\n#feats = [tf.feature_column.numeric_column(key = key) for key in x.columns]\n# Building a 3-layer DNN with 50 units each.\nclassifier_tf = tf.estimator.DNNClassifier(feature_columns=feats,\n hidden_units=[50, 50, 50],\n n_classes=3)\nclassifier_tf.fit(x, y, steps=5000)\n\npredictions = list(classifier_tf.predict(xTrain, as_iterable=True))\nplt.plot(x, y, \"*\")\nplt.plot(xTrain, predictions)\nplt.show()","repo_name":"cobaltCorsair/EBT_films_dose","sub_path":"neural/checkKeras.py","file_name":"checkKeras.py","file_ext":"py","file_size_in_byte":4937,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"}
+{"seq_id":"3637721097","text":"import a.sys.sys_web.mng.django_app as api\nimport time\nimport loggers\nimport datetime\nimport time\nimport warnings\nwith warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n #jpype module uses deprecated sets module\n import jpype\nimport os\nimport socket\n\nDAY = 24*3600\nGB = 1024*1024*1024\nGbps = 1e9\nMB = 1024*1024\n\n# For week, month and 3 months intervals we take one day spare\nRANGE_PARAMETERS = [\n { 'limit': 2*3600, 'col-interval': 300, 'point-interval': 60, 'tick-interval': 300 },\n { 'limit': 12*3600, 'col-interval': 30*60, 'point-interval': 300, 'tick-interval': 30*60 },\n { 'limit': 24*3600, 'col-interval': 3600, 'point-interval': 600, 'tick-interval': 3600 },\n { 'limit': 48*3600, 'col-interval': 2*3600, 'point-interval': 20*60, 'tick-interval': 2*3600 },\n { 'limit': 5*DAY, 'col-interval': 12*3600, 'point-interval': 30*60, 'tick-interval': 12*3600 },\n { 'limit': 8*DAY, 'col-interval': 12*3600, 'point-interval': 60*60, 'tick-interval': 12*3600 },\n { 'limit': 32*DAY, 'col-interval': DAY, 'point-interval': 4*3600, 'tick-interval': DAY },\n { 'limit': 93*DAY, 'col-interval': 7*DAY, 'point-interval': DAY, 'tick-interval': 7*DAY },\n ]\n\ndef getRangeParameter(from_, to_, paramName):\n delta = to_ - from_\n for e in RANGE_PARAMETERS:\n if delta <= e['limit']:\n return e[paramName]\n return RANGE_PARAMETERS[-1][paramName]\n\ndef getPointInterval(from_, to_):\n return getRangeParameter(from_, to_, 'point-interval')\n\ndef getColInterval(from_, to_):\n return getRangeParameter(from_, to_, 'col-interval')\n\ndef getIntervalUnit(from_, to_, intervalType=\"point-interval\"):\n val= getRangeParameter(from_, to_, 'tick-interval') / getRangeParameter(from_, to_, intervalType)\n loggers.accessLogger.debug(\"from_=%d, to_=%d delta=%d intervalUnit=%d\" % (from_, to_, to_-from_, val))\n return val\n\ndef time2excel(timestamp):\n dt = datetime.datetime.fromtimestamp(timestamp)\n dt1900 = datetime.datetime(1900,1,1)\n delta = dt - dt1900\n\n # Excel handles time in number of days since 1, January 1900\n # Where 1, Jan 1900 itself is 1\n # Additional +1 is fix to bug in Excel that treats 1900 as a leap year\n value = delta.days + 2 + delta.seconds/24./3600.\n return value\n\ndef colName(col):\n return chr(col+ord('A'))\ndef cellName(col, row):\n return \"%s%d\" % (colName(col), row+1)\n\ndef navigate(root, path):\n pathElements = path.split(\"/\")\n component = root\n for e in pathElements:\n if isinstance(component, list):\n key = int(e)\n else:\n key = e\n component = component[key]\n return component\n\ndef navigateOrDefault(root, path, default):\n pathElements = path.split(\"/\")\n component = root\n try:\n for e in pathElements:\n if isinstance(component, list):\n key = int(e)\n else:\n key = e\n component = component[key]\n except:\n return default\n return component\n\ndef floorVal(val, interval):\n \"\"\"\n Gets closest round value in the multiplications of intervals smaller than val\n Example:\n floorTime(3700, 3600) = 3600\n \"\"\"\n return val/interval*interval\n\ndef ceilVal(val, interval):\n \"\"\"\n Gets closest round value in the multiplications of intervals bigger than val\n Example:\n ceilTime(3700, 3600) = 7200\n \"\"\"\n return (val+interval-1)/interval*interval\n\ndef getTimezone(val):\n if time.daylight and time.localtime(val).tm_isdst:\n return time.altzone\n else:\n return time.timezone\n\ndef getCurrentTimezone():\n return getTimezone(time.time())\n\ndef floorDay(val):\n \"\"\"Returns closest day start in local timezone before val\"\"\"\n timezone = getTimezone(val)\n return (val - timezone) / DAY * DAY + timezone\n\ndef ceilDay(val):\n \"\"\"Returns closest day end in local timezone before val\"\"\"\n timezone = getTimezone(val)\n return (val - timezone +DAY-1) / DAY * DAY + timezone\n\ndef floorTime(t, interval):\n if interval >= DAY:\n return floorDay(t)\n elif interval >= 3600:\n return floorVal(t, 3600)\n else:\n return floorVal(t, interval)\n\ndef ceilTime(t, interval):\n if interval >= DAY:\n return ceilDay(t)\n elif interval >= 3600:\n return ceilVal(t, 3600)\n else:\n return ceilVal(t, interval)\n\nclass ExcelRange:\n def __init__(self):\n self.startRow = 0\n self.endRow = 1\n self.startCol = 0\n self.endCol = 1\n self.sheetName = None\n def setRange(self, rangeStr):\n if '!' in rangeStr:\n (sheetName,rangeStr) = rangeStr.split('!', 2)\n sheetName.strip(\"'\")\n self.sheetName = sheetName\n\n (startCell, lastCell) = rangeStr.split(':')\n self.startRow = int(startCell[1:])-1\n self.endRow = int(lastCell[1:])\n self.startCol = ord(startCell[0].lower()) - ord('a')\n self.endCol = ord(lastCell[0].lower()) - ord('a') + 1\n def setRegion(self, startRow, startCol, endRow, endCol):\n self.startRow = startRow\n self.startCol = startCol\n self.endRow = endRow\n self.endCol = endCol\n\n def setColRegion(self, col, startRow, endRow):\n self.startRow = startRow\n self.startCol = col\n self.endRow = endRow\n self.endCol = col+1\n def __str__(self):\n rangeStr = \"$%s$%d:$%s$%d\" %(colName(self.startCol), self.startRow+1, colName(self.endCol), self.endRow)\n return rangeStr if not self.sheetName else \"'%s'!%s\" % (self.sheetName, rangeStr)\n\n\nclass Requestor:\n def __init__(self):\n self.cache = {}\n\n _instance = None\n\n @staticmethod\n def instance():\n if not Requestor._instance:\n Requestor._instance = Requestor()\n return Requestor._instance\n\n def getResponse(self, request):\n response = self.cache.get(str(request), None)\n if response is None:\n response = api.apiSystemProcessSection(0, request)\n self.cache[str(request)] = response\n return response\n\n def clearCache(self):\n self.cache.clear()\n\nclass Writer:\n def __init__(self, sheetName, linkRange):\n \"\"\"\n Initialize writer sheet and cell range\n linkRange - should be of style 'A10:C20'\n \"\"\"\n parsedRange = ExcelRange()\n parsedRange.setRange(linkRange)\n\n self.sheetName = sheetName\n self.startRow = parsedRange.startRow\n self.endRow = parsedRange.endRow\n self.startCol = parsedRange.startCol\n self.endCol = parsedRange.endCol\n self.shouldUpdateDayFormat = True\n\n def createFullRange(self, startCol, startRow, endCol, endRow):\n \"\"\"\n Gets Excel range\n \"\"\"\n rangeStr = \"$%s$%d:$%s$%d\" %(colName(startCol), startRow+1, colName(endCol), endRow)\n return \"'%s'!%s\" % (self.sheetName, rangeStr) \n \n def getFullRange(self):\n \"\"\"\n Gets Excel range with for current link range\n \"\"\"\n return self.createFullRange(self.startCol, self.startRow, self.endCol-1, self.endRow)\n\n def copyCellFormat(self, workbook, fromCol, fromRow, toCol, toRow):\n rangeStyle = workbook.getRangeStyle(fromRow, fromCol, fromRow, fromCol)\n fontColor = rangeStyle.getFontColor()\n isLocked = rangeStyle.isLocked()\n loggers.accessLogger.debug(\"rangeStyle=%06x %06x isLocked=%d\" % (rangeStyle.getFontColor(), rangeStyle.getPatternBG(), rangeStyle.isLocked()) )\n rangeStyle.useAllFormat()\n # do this explicitly since useAllFormat doesn't apply font color\n rangeStyle.setFontColor(fontColor)\n rangeStyle.setLocked(True)\n workbook.setRangeStyle(rangeStyle, toRow, toCol, toRow, toCol)\n\n loggers.accessLogger.debug(\"copied rangeStyle=%06x %06x\" % (rangeStyle.getFontColor(), rangeStyle.getPatternBG()) )\n\n def copyRowFormat(self, workbook, fromRow, toRow):\n if fromRow == toRow:\n return\n for col in range(self.startCol, self.endCol):\n self.copyCellFormat(workbook, col, fromRow, col, toRow)\n\n def getRequest(self, paramsDecoder):\n \"\"\"returns request parameters to api\"\"\"\n raise NotImplementedError\n\n def write(self, workbook, response):\n \"\"\"Writes response to workbook. Relevant sheet is selected\"\"\"\n raise NotImplementedError\n\n def extendRows(self, workbook, newRowNumber):\n \"\"\"\n This function extends number of rows in the range to the newRowNumber\n New last row is formatted according to the old last row\n Intermidiate rows are formatted according to the (old last row -1)\n \"\"\"\n if newRowNumber == self.endRow - self.startRow:\n return\n WorkBook = jpype.JClass('com.smartxls.WorkBook')\n # copy last row format\n prevLastRow = self.endRow - 1\n self.endRow = self.startRow + newRowNumber\n newLastRow = self.endRow - 1\n if self.endRow > self.startRow+2:\n # copy endRow format only if there is more than 2 rows in the range\n self.copyRowFormat(workbook, prevLastRow, newLastRow)\n if newLastRow < prevLastRow:\n # delete extra cells\n workbook.deleteRange(newLastRow+1, self.startCol, prevLastRow, self.endCol-1, WorkBook.ShiftVertical)\n return\n # Copy intermidiate rows\n for i in range(prevLastRow,newLastRow):\n self.copyRowFormat(workbook, prevLastRow-1, i)\n\n def updateChartIntervalUnit(self, chart, intervalUnit):\n if intervalUnit <= 1:\n return\n chart.setAxisScaleType(0, 0, 2)\n chart.setScaleMajorUnitAuto(0, 0, False)\n try:\n chart.setTimeScaleMajorUnit(0, 0, 1, intervalUnit)\n chart.setTimeScaleMinorUnit(0, 0, 1, intervalUnit)\n except:\n loggers.mainLogger.warning(\"Failed to set intervalUnit to %d\" % intervalUnit)\n\n def setDayFormat(self, workbook, col, startRow, endRow):\n \"\"\"\n The function iterates over cells and updates their format so that each new day will be presented as 'mmm dd', e.g.:\n 18:00 -> 18:00\n 00:00 -> May 01\n 06:00 -> 06:00\n 12:00 -> 12:00\n 02:00 -> May 02\n \"\"\"\n if not self.shouldUpdateDayFormat:\n return\n lastDay = workbook.getNumber(startRow, col)\n dayFormat = \"[$-409]mmm dd\"\n for row in range(startRow+1, endRow):\n currentDay = workbook.getNumber(row, col)\n if int(currentDay) != int(lastDay):\n rangeStyle = workbook.getRangeStyle(row, col, row, col)\n rangeStyle.setCustomFormat(dayFormat)\n workbook.setRangeStyle(rangeStyle, row, col, row, col)\n lastDay = currentDay\n\nclass OvertimeWriter(Writer):\n def __init__(self, sheetName, linkRange, dataType, rangePrameter, scale):\n Writer.__init__(self, sheetName, linkRange)\n self.dataType = dataType\n self.scale = scale\n self.rangeParameter = rangePrameter\n\n def getRequest(self, paramsDecoder):\n self.intervalUnit = getIntervalUnit(paramsDecoder.getFromTime(), paramsDecoder.getToTime(), intervalType=self.rangeParameter)\n from_ = paramsDecoder.getFromTime()\n to_ = paramsDecoder.getToTime()\n interval = getRangeParameter(paramsDecoder.getFromTime(), paramsDecoder.getToTime(), self.rangeParameter)\n # Align from and to according to the \"strict API\"\n from_ = floorTime(from_, interval)\n to_ = ceilTime(to_, interval)\n\n return {'section': 'report',\n 'reportType': 'overTime',\n 'dataType': self.dataType,\n 'from': str(from_),\n 'to': str(to_),\n 'interval': str(interval),\n }\n\n def write(self, workbook, response):\n \"\"\"Writes response to workbook. Relevant sheet is selected\"\"\"\n try:\n overtimeReports = response['reports']['overTime']\n (reportType, reportData) = overtimeReports.items()[0]\n potentialPoints = reportData[\"potential\"][\"points\"]\n totalPoints = reportData[\"total\"][\"points\"]\n servedPoints = reportData[\"served\"][\"points\"]\n except:\n loggers.mainLogger.error(\"response=%s\" % response)\n raise\n numPoints = len(totalPoints)\n for i in range(numPoints):\n workbook.setNumber( i+self.startRow+1, self.startCol+0, time2excel(totalPoints[i][\"date\"]))\n workbook.setNumber(i+self.startRow+1, self.startCol+1, float(totalPoints[i][\"value\"])/self.scale)\n workbook.setNumber(i+self.startRow+1, self.startCol+2, float(potentialPoints[i][\"value\"])/self.scale)\n workbook.setNumber(i+self.startRow+1, self.startCol+3, float(servedPoints[i][\"value\"])/self.scale)\n workbook.setFormula(i+self.startRow+1, self.startCol+4, \"%s/%s\"% (cellName(self.startCol+3,i+self.startRow+1), cellName(self.startCol+1, i+self.startRow+1)))\n\n self.extendRows(workbook, numPoints+1)\n self.setDayFormat(workbook, self.startCol, self.startRow+1, self.endRow)\n chart = workbook.getChart(0)\n # last col is percentage - do not show it on graph\n self.endCol -= 1\n chart.setLinkRange(self.getFullRange(), False)\n self.updateChartIntervalUnit(chart, self.intervalUnit)\n\nclass CurrentTrafficWriter(Writer):\n def __init__(self, sheetName, linkRange):\n Writer.__init__(self, sheetName, linkRange)\n\n def getRequest(self, paramsDecoder):\n # get current time alignet to minutes\n currentTime = floorTime(api.getCurrentTime(), 60)\n return {'section': 'report',\n 'reportType': 'overTime',\n 'dataType': 'BW',\n 'from': str(currentTime - 600),\n 'to': str(currentTime),\n 'interval': '60',\n }\n\n def write(self, workbook, response):\n \"\"\"Writes response to workbook. Relevant sheet is selected\"\"\"\n try:\n overtimeReports = response['reports']['overTime']\n (reportType, reportData) = overtimeReports.items()[0]\n potentialPoints = reportData[\"potential\"][\"points\"]\n totalPoints = reportData[\"total\"][\"points\"]\n servedPoints = reportData[\"served\"][\"points\"]\n except:\n loggers.mainLogger.error(\"response=%s\" % response)\n raise\n numPoints = len(totalPoints)\n # Find latest available point\n if numPoints > 0:\n total = float(totalPoints[-1][\"value\"])/1e9\n if total == 0:\n total = 1e-9\n served = float(servedPoints[-1][\"value\"])/1e9\n potential = float(potentialPoints[-1][\"value\"])/1e9\n\n if self.endRow - self.startRow == 1:\n # range is horizontal\n workbook.setNumber(self.startRow, self.startCol+1, total)\n workbook.setNumber(self.startRow, self.startCol+3, served)\n if self.endCol > self.startCol+5:\n workbook.setNumber(self.startRow, self.startCol+5, potential)\n else:\n # assumed range is vertical\n workbook.setNumber(self.startRow+0, self.startCol+1, total)\n workbook.setNumber(self.startRow+1, self.startCol+1, served)\n workbook.setNumber(self.startRow+2, self.startCol+1, potential)\n\nclass BandwidthWriter(OvertimeWriter):\n def __init__(self, sheetName, linkRange):\n OvertimeWriter.__init__(self, sheetName, linkRange, \"BW\", \"point-interval\", 1e9)\n\nclass SessionsWriter(OvertimeWriter):\n def __init__(self, sheetName, linkRange):\n OvertimeWriter.__init__(self, sheetName, linkRange, \"sessions\", \"col-interval\", 1000)\n\nclass ViewTimeWriter(OvertimeWriter):\n def __init__(self, sheetName, linkRange):\n OvertimeWriter.__init__(self, sheetName, linkRange, \"viewTime\", \"col-interval\", 24*3600)\n\nclass TrafficWriter(OvertimeWriter):\n def __init__(self, sheetName, linkRange):\n Writer.__init__(self, sheetName, linkRange)\n def getRequest(self, paramsDecoder):\n from_ = paramsDecoder.getFromTime()\n to_ = paramsDecoder.getToTime()\n interval = getPointInterval(from_, to_)\n # Align from and to according to the \"strict API\"\n from_ = floorTime(from_, interval)\n to_ = ceilTime(to_, interval)\n self.intervalUnit = getIntervalUnit(from_, to_)\n return {'section': 'report',\n 'reportType': 'overTime',\n 'dataType': 'L2BW',\n 'from': str(from_),\n 'to': str(to_),\n 'interval': str(interval),\n }\n\n def write(self, workbook, response):\n \"\"\"Writes response to workbook. Relevant sheet is selected\"\"\"\n overtimeReports = response['reports']['overTime']\n (reportType, reportData) = overtimeReports.items()[0]\n totalPoints = reportData[\"total\"][\"points\"]\n videoPoints = reportData[\"video\"][\"points\"]\n servedPoints = reportData[\"videoServed\"][\"points\"]\n numPoints = len(totalPoints)\n for i in range(numPoints):\n workbook.setNumber(i+self.startRow+1, self.startCol+0, time2excel(totalPoints[i][\"date\"]))\n workbook.setNumber(i+self.startRow+1, self.startCol+1, float(totalPoints[i][\"value\"])/1e9)\n workbook.setNumber(i+self.startRow+1, self.startCol+2, float(videoPoints[i][\"value\"])/1e9)\n workbook.setNumber(i+self.startRow+1, self.startCol+3, float(servedPoints[i][\"value\"])/1e9)\n if self.startCol+4 < self.endCol:\n # fifth column may be a cache ratio\n workbook.setFormula(i+self.startRow+1, self.startCol+4, \"%s/%s\"% (cellName(self.startCol+3,i+self.startRow+1), cellName(self.startCol+1, i+self.startRow+1)))\n\n self.extendRows(workbook, numPoints+1)\n self.setDayFormat(workbook, self.startCol, self.startRow+1, self.endRow)\n chart = workbook.getChart(0)\n # Exclude cache ratio column if exists\n self.endCol = self.startCol + 4\n chart.setLinkRange(self.getFullRange(), False)\n self.updateChartIntervalUnit(chart, self.intervalUnit)\n\nclass ParetoWriter(Writer):\n def __init__(self, sheetName, linkRange, groupBy):\n Writer.__init__(self, sheetName, linkRange)\n self.groupBy = groupBy\n def getRequest(self, paramsDecoder):\n from_ = paramsDecoder.getFromTime()\n to_ = paramsDecoder.getToTime()\n from_ = floorTime(from_, 3600)\n to_ = ceilTime(to_, 3600)\n return {'section': 'report',\n 'reportType': 'pareto',\n 'dataType': 'volume',\n 'groupBy': self.groupBy,\n 'from': str(from_),\n 'to': str(to_),\n }\n def write(self, workbook, response):\n \"\"\"Writes response to workbook. Relevant sheet is selected\"\"\"\n loggers.accessLogger.debug(\"response=%s\", str(response))\n report = response['reports']['pareto'][self.groupBy]['volume']\n totalItems = report['grandTotal']['items']\n totalValue = report['grandTotal']['value']/1e9\n rowIter = self.startRow + 1\n for entry in report['entries']:\n workbook.setNumber(rowIter, self.startCol+2, entry['cumValue']/1e9)\n workbook.setNumber(rowIter, self.startCol+3, float(entry['cumItems']))\n workbook.setFormula(rowIter, self.startCol+0, \"%s/%f\" % (cellName(self.startCol+3, rowIter), totalItems))\n workbook.setFormula(rowIter, self.startCol+1, \"%s/%f\" % (cellName(self.startCol+2, rowIter), totalValue))\n rowIter += 1\n\n self.extendRows(workbook, rowIter - self.startRow)\n\n # Only first 2 columns are drawn on the chart\n self.endCol = self.startCol + 2\n chart = workbook.getChart(0)\n\n loggers.accessLogger.debug(\"..xaxisFormula = %s\" % chart.getSeriesXValueFormula(0))\n loggers.accessLogger.debug(\"..yaxisFormula = %s\" % chart.getSeriesYValueFormula(0))\n loggers.accessLogger.debug(\"..new Range: \\\"%s\\\"\" % self.createFullRange(self.startCol, self.startRow+1, self.startCol, self.endRow))\n loggers.accessLogger.debug(\"..new Range: \\\"%s\\\"\" % self.createFullRange(self.startCol+1, self.startRow+1, self.startCol+1, self.endRow))\n chart.setSeriesXValueFormula(0, self.createFullRange(self.startCol, self.startRow+1, self.startCol, self.endRow))\n chart.setSeriesYValueFormula(0, self.createFullRange(self.startCol + 1, self.startRow+1, self.startCol + 1, self.endRow))\n\nclass SubscribersParetoWriter(ParetoWriter):\n def __init__(self, sheetName, linkRange):\n ParetoWriter.__init__(self, sheetName, linkRange, \"subscribers\")\n\nclass TitlesParetoWriter(ParetoWriter):\n def __init__(self, sheetName, linkRange):\n ParetoWriter.__init__(self, sheetName, linkRange, \"titles\")\n\nclass SystemStatusWriter(Writer):\n def __init__(self, sheetName, linkRange):\n Writer.__init__(self, sheetName, linkRange)\n def getRequest(self, paramsDecoder):\n return {'section': 'system',\n 'type': 'shortStatus',\n }\n def write(self, workbook, response):\n \"\"\"Writes response to workbook. Relevant sheet is selected\"\"\"\n status = response[\"system\"][\"status\"]\n # cache\n workbook.setNumber(self.startRow+1, self.startCol+1, float(navigateOrDefault(status,\"cache/stored\", 0)))\n workbook.setNumber(self.startRow+2, self.startCol+1, float(navigateOrDefault(status,\"cache/acquired\", 0)))\n workbook.setNumber(self.startRow+3, self.startCol+1, float(navigateOrDefault(status,\"cache/delivered\", 0)))\n # software information\n workbook.setText(self.startRow+1, self.startCol+3, status[\"software\"][\"version\"])\n workbook.setText(self.startRow+2, self.startCol+3, status[\"software\"][\"license\"])\n workbook.setText(self.startRow+3, self.startCol+3, \"N/A\") # site pack\n\nclass ReportTimeWriter(Writer):\n def __init__(self, sheetName, linkRange):\n Writer.__init__(self, sheetName, linkRange)\n def getRequest(self, paramsDecoder):\n self.from_ = paramsDecoder.getFromTime()\n self.to_ = paramsDecoder.getToTime()\n return None\n def write(self, workbook, response):\n \"\"\"Writes response to workbook. Relevant sheet is selected\"\"\"\n workbook.setNumber(self.startRow, self.startCol, time2excel(api.getCurrentTime()))\n workbook.setNumber(self.startRow+1, self.startCol+0, time2excel(self.from_))\n workbook.setNumber(self.startRow+1, self.startCol+1, time2excel(self.to_))\n\nclass HostnameWriter(Writer):\n def __init__(self, sheetName, linkRange):\n Writer.__init__(self, sheetName, linkRange)\n def getRequest(self, paramsDecoder):\n return None\n def write(self, workbook, response):\n \"\"\"Writes response to workbook. Relevant sheet is selected\"\"\"\n workbook.setText(self.startRow, self.startCol, socket.gethostname())\n\nclass TopSitesWriter(Writer):\n def __init__(self, sheetName, linkRange, dataType, scale):\n Writer.__init__(self, sheetName, linkRange)\n self.dataType = dataType\n self.scale = scale\n def getRequest(self, paramsDecoder):\n from_ = paramsDecoder.getFromTime()\n to_ = paramsDecoder.getToTime()\n try:\n countNum = paramsDecoder.getCount()\n except:\n countNum = 10\n return {'section': 'report',\n 'reportType': 'top',\n 'dataType': self.dataType,\n 'groupBy': 'sites',\n 'from': str(from_),\n 'to': str(to_),\n 'count': str(countNum),\n }\n def write(self, workbook, response):\n report = response['reports']['top']['sites'][self.dataType]\n grandTotal = report[\"grandTotal\"]\n row = self.startRow+1\n \n for e in report['entries']:\n workbook.setNumber(row, self.startCol+0, float(e['rank']))\n workbook.setText(row, self.startCol+1, e['siteId'])\n workbook.setNumber(row, self.startCol+2, float(e['total']/self.scale))\n workbook.setNumber(row, self.startCol+3, float(e['potential']/self.scale))\n workbook.setNumber(row, self.startCol+4, float(e['served']/self.scale))\n row += 1\n\n workbook.setText(row, self.startCol+1, \"Other\")\n workbook.setFormula(row, self.startCol+2, \"%s-SUM(%s:%s)\" % (cellName(self.startCol+2,row+1), cellName(self.startCol+2,self.startRow+1), cellName(self.startCol+2, row-1) ) )\n workbook.setFormula(row, self.startCol+3, \"%s-SUM(%s:%s)\" % (cellName(self.startCol+3,row+1), cellName(self.startCol+3,self.startRow+1), cellName(self.startCol+3, row-1) ))\n workbook.setFormula(row, self.startCol+4, \"%s-SUM(%s:%s)\" % (cellName(self.startCol+4,row+1), cellName(self.startCol+4,self.startRow+1), cellName(self.startCol+4, row-1) ))\n\n row += 1\n workbook.setText(row, self.startCol+1, \"Total\")\n workbook.setNumber(row, self.startCol+2, float(grandTotal['total']/self.scale))\n workbook.setNumber(row, self.startCol+3, float(grandTotal['potential']/self.scale))\n workbook.setNumber(row, self.startCol+4, float(grandTotal['served']/self.scale))\n self.extendRows(workbook, row+1-self.startRow)\n chart = workbook.getChart(0)\n self.endRow = row+1\n\n linkRange = ExcelRange()\n linkRange.sheetName = self.sheetName\n linkRange.setRegion(self.startRow, self.startCol+1, self.endRow-1, self.startCol+3)\n\n chart.setLinkRange(str(linkRange), False)\n ChartFormat = jpype.JClass('com.smartxls.ChartFormat')\n chartFormat = chart.getPlotFormat()\n if report['entries'] < 20:\n chartFormat.setDataLabelTypes(ChartFormat.DataLabelPercentageMask | ChartFormat.DataLabelXValueMask )\n else:\n chartFormat.setDataLabelTypes( ChartFormat.DataLabelPercentageMask )\n chart.setPlotFormat(chartFormat)\n\n\nclass TopSubscribersWriter(Writer):\n def __init__(self, sheetName, linkRange, dataType, factor):\n Writer.__init__(self, sheetName, linkRange)\n self.dataType = dataType\n self.factor = factor\n def getRequest(self, paramsDecoder):\n from_ = paramsDecoder.getFromTime()\n to_ = paramsDecoder.getToTime()\n try:\n countNum = paramsDecoder.getCount()\n except:\n countNum = 10\n return {'section': 'report',\n 'reportType': 'top',\n 'dataType': self.dataType,\n 'groupBy': 'subscribers',\n 'from': str(from_),\n 'to': str(to_),\n 'count': str(countNum),\n }\n def write(self, workbook, response):\n report = response['reports']['top']['subscribers'][self.dataType]\n row = self.startRow+1\n\n for e in report['entries']:\n workbook.setNumber(row, self.startCol+0, float(e['rank']))\n workbook.setText(row, self.startCol+1, e['subscriberId'])\n workbook.setNumber(row, self.startCol+2, float(e['value']/self.factor))\n row += 1\n self.extendRows(workbook, row-self.startRow)\n\nclass TopTitlesWriter(Writer):\n def __init__(self, sheetName, linkRange, dataType, firstCol, firstColScale, secondCol, secondColScale):\n Writer.__init__(self, sheetName, linkRange)\n self.dataType = dataType\n self.firstCol = firstCol\n self.firstColScale = firstColScale\n self.secondCol = secondCol\n self.secondColScale = secondColScale\n def getRequest(self, paramsDecoder):\n from_ = paramsDecoder.getFromTime()\n to_ = paramsDecoder.getToTime()\n try:\n countNum = paramsDecoder.getCount()\n except:\n countNum = 10\n return {'section': 'report',\n 'reportType': 'top',\n 'dataType': self.dataType,\n 'groupBy': 'titles',\n 'from': str(from_),\n 'to': str(to_),\n 'count': str(countNum),\n }\n def write(self, workbook, response):\n report = response['reports']['top']['titles'][self.dataType]\n row = self.startRow+1\n\n for e in report['entries']:\n workbook.setNumber(row, self.startCol+0, float(e['rank']))\n workbook.setText(row, self.startCol+1, e['name'])\n workbook.setText(row, self.startCol+2, e['siteId'])\n sessionNum = e['sessionNumber']\n if sessionNum < 1:\n loggers.accessLogger.warning(\"Session number for title '%s' was 0 etting to 1\" % e['name'])\n sessionNum = 1\n workbook.setNumber(row, self.startCol+3, float(e[self.firstCol])/self.firstColScale)\n workbook.setNumber(row, self.startCol+4, float(e[self.secondCol])/self.secondColScale)\n workbook.setNumber(row, self.startCol+5, float(e['totalViewTime'])/sessionNum/DAY)\n workbook.setNumber(row, self.startCol+6, float(e['totalVolume'])/sessionNum/MB)\n row += 1\n self.extendRows(workbook, row-self.startRow)\n\nclass DailyDistributionWriter(Writer):\n def __init__(self, sheetName, linkRange, dataType, scale):\n Writer.__init__(self, sheetName, linkRange)\n self.dataType = dataType\n self.scale = scale\n self.timezone = getCurrentTimezone()\n def getRequest(self, paramsDecoder):\n from_ = paramsDecoder.getFromTime()\n to_ = paramsDecoder.getToTime()\n return {'section': 'report',\n 'reportType': 'distribution',\n 'distributionType': 'time',\n 'dataType': self.dataType,\n 'count': '24',\n 'interval': '3600',\n 'from': str(from_),\n 'to': str(to_),\n }\n def getLocalHour(self, time):\n return ((time - self.timezone)/3600)%24\n def write(self, workbook, response):\n try:\n report = response['reports']['distribution']['time'][self.dataType]\n totalPoints = report['total']['points']\n except:\n loggers.mainLogger.warning(\"report = %s\" % report)\n return\n\n potentialPoints = report['potential']['points'] if 'potential' in report else None\n servedPoints = report['served']['points'] if 'served' in report else None\n row = self.startRow+1\n for p in totalPoints:\n workbook.setNumber(self.startRow+1+self.getLocalHour(p['time']), self.startCol+1, float(p['value']/self.scale))\n if potentialPoints:\n for p in potentialPoints:\n workbook.setNumber(self.startRow+1+self.getLocalHour(p['time']), self.startCol+2, float(p['value']/self.scale))\n if servedPoints:\n for p in servedPoints:\n workbook.setNumber(self.startRow+1+self.getLocalHour(p['time']), self.startCol+3, float(p['value']/self.scale))\n\nclass ParetoOverTimeWriter(Writer):\n def __init__(self, sheetName, linkRange, groupBy, percentiles):\n Writer.__init__(self, sheetName, linkRange)\n self.groupBy = groupBy\n self.percentiles = percentiles\n\n def getRequest(self, paramsDecoder):\n from_ = paramsDecoder.getFromTime()\n to_ = paramsDecoder.getToTime()\n interval = 3600\n from_ = floorTime(from_, interval)\n to_ = ceilTime(to_, interval)\n self.intervalUnit = getRangeParameter(from_, to_, 'tick-interval')/interval\n loggers.accessLogger.debug(\"intervalUnit=%d to-from=%d\" % (self.intervalUnit, to_-from_))\n return {'section': 'report',\n 'reportType': 'overTime',\n 'dataType': 'pareto',\n 'paretoDataType': 'volume',\n 'groupBy': self.groupBy,\n 'percentiles': \",\".join(str(p) for p in self.percentiles),\n 'from': str(from_),\n 'to': str(to_),\n 'interval': str(interval),\n }\n\n def write(self, workbook, response):\n \"\"\"Writes response to workbook. Relevant sheet is selected\"\"\"\n loggers.accessLogger.info(\"Write workbook\")\n try:\n report = response['reports']['overTime']['pareto'][self.groupBy]\n except:\n loggers.mainLogger.error(\"response=%s\" % response)\n raise\n numPoints = len(self.percentiles)\n percentilePoints = {}\n for pArr in report['percentiles']:\n percentilePoints[pArr['value']] = pArr['points']\n # print times\n firstP = self.percentiles[0]\n numPoints = len(percentilePoints[firstP])\n for i in range(numPoints):\n workbook.setNumber(i+self.startRow+1, self.startCol, time2excel(percentilePoints[firstP][i][\"date\"]))\n\n totalPoints = report['total']['points']\n for i in range(numPoints):\n #totalEntities = response['total'][i][\"entities\"]\n totalValue = totalPoints[i][\"value\"]\n col = self.startCol+1\n for p in self.percentiles:\n workbook.setNumber(i+self.startRow+1, col, float(percentilePoints[p][i][\"value\"])/totalValue)\n col += 1\n self.extendRows(workbook, numPoints+1)\n self.setDayFormat(workbook, self.startCol, self.startRow+1, self.endRow)\n chart = workbook.getChart(0)\n chart.setLinkRange(self.getFullRange(), False)\n self.updateChartIntervalUnit(chart, self.intervalUnit)\n\n\nclass PeakDataRequestor:\n def __init__(self):\n pass\n\n def getRequest(self, paramsDecoder):\n from_ = paramsDecoder.getFromTime()\n to_ = paramsDecoder.getToTime()\n # align from_ and to_ to round days:\n from_ = floorDay(from_)\n to_ = ceilDay(to_)\n self.interval = getRangeParameter(from_, to_, \"col-interval\")\n if self.interval < DAY:\n self.interval = DAY\n self.intervalUnit = 1\n else:\n self.intervalUnit = getIntervalUnit(from_, to_, intervalType=\"col-interval\")\n\n return {'section': 'report',\n 'reportType': 'overTime',\n 'dataType': self.dataType,\n 'from': str(from_),\n 'to': str(to_),\n 'interval': str(self.interval),\n 'aggregationType': 'max',\n }\n\nclass PeakDataWriter(OvertimeWriter, PeakDataRequestor):\n def __init__(self, sheetName, linkRange, dataType, scale):\n OvertimeWriter.__init__(self, sheetName, linkRange, dataType, \"col-interval\", scale)\n PeakDataRequestor.__init__(self)\n self.shouldUpdateDayFormat = False\n def getRequest(self, paramsDecoder):\n return PeakDataRequestor.getRequest(self, paramsDecoder)\n\nclass PeakTrafficDataWriter(TrafficWriter, PeakDataRequestor):\n def __init__(self, sheetName, linkRange):\n TrafficWriter.__init__(self, sheetName, linkRange)\n PeakDataRequestor.__init__(self)\n self.shouldUpdateDayFormat = False\n self.dataType = \"L2BW\"\n def getRequest(self, paramsDecoder):\n return PeakDataRequestor.getRequest(self, paramsDecoder)\n\nclass PeakTopSitesWriter(TopSitesWriter):\n def __init__(self, sheetName, linkRange, dataType, scale):\n TopSitesWriter.__init__(self, sheetName, linkRange, dataType, scale)\n def getRequest(self, paramsDecoder):\n from_ = paramsDecoder.getFromTime()\n to_ = paramsDecoder.getToTime()\n if (from_ - to_) != 3600:\n # send the peak request to obtain maximum data point\n request = {'section': 'report',\n 'reportType': 'overTime',\n 'dataType': 'L2BW',\n 'from': str(from_),\n 'to': str(to_),\n 'interval': str(to_ - from_),\n 'aggregationType': 'max',\n }\n response = Requestor.instance().getResponse(request)\n try:\n maxDate = navigate(response,\"reports/overTime/L2BW/total/points/0/date\")\n from_ = floorTime(maxDate, 3600)\n to_ = ceilTime(maxDate+1, 3600)\n except:\n loggers.mainLogger.warning(\"Failed to extract max date, response=%s\" % str(response))\n paramsDecoder.setParam(\"from\", from_)\n paramsDecoder.setParam(\"to\", to_)\n return TopSitesWriter.getRequest(self, paramsDecoder)\n\n\n","repo_name":"afeset/miner2-tools","sub_path":"oscar/a/sys/sys_web/mng/django_app/excel_writer.py","file_name":"excel_writer.py","file_ext":"py","file_size_in_byte":36750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"11939981072","text":"from nextcord import Embed, Colour\nimport asyncio\nfrom json import loads\nfrom threading import Thread\nfrom random import choice\n\n#Get meme in another thread for JSON module. I'm lazy\nasync def getmeme(stdout, ctx):\n stdout = loads(stdout)\n await ctx.send(embed = Embed(title = stdout[\"title\"]).set_image(url = stdout[\"url\"]))\n\n#Choose a random subreddit:\n#Get API return using asyncio.create_subprocess_shell\nasync def Meme(ctx):\n await asyncio.sleep(1)\n meme = choice([\"holup\", \"dankmemes\", \"darkmemers\", \"memes\"])\n proc = await asyncio.create_subprocess_shell(\n f\"curl https://meme-api.herokuapp.com/gimme/{meme}\",\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE)\n stdout = await proc.communicate()\n t = Thread(target = await getmeme(stdout[0].decode(), ctx))\n t.start()\n","repo_name":"Neurs12/horyzon","sub_path":"horyzon/components/JandM.py","file_name":"JandM.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"7686843593","text":"import cv2\nimport os\nimport numpy as np\n\n\ndef calculate_angle(the_lines: list) -> float:\n \"\"\"\n Calculates angle of the line(s) provided\n :param the_lines: list of lists, lines found and filtered\n :return: angle\n \"\"\"\n if len(the_lines) == 2:\n x1_1 = the_lines[0][0][0]\n y1_1 = the_lines[0][0][1]\n x2_1 = the_lines[0][1][0]\n y2_1 = the_lines[0][1][1]\n\n # Original approach\n # angle_1 = round(90 - np.rad2deg(np.arctan2(abs(y2_1 - y1_1), abs(x2_1 - x1_1))), 2)\n\n angle_1 = round(np.rad2deg(np.arctan(abs(x2_1 - x1_1) / abs(y2_1 - y1_1))), 2)\n\n x1_2 = the_lines[1][0][0]\n y1_2 = the_lines[1][0][1]\n x2_2 = the_lines[1][1][0]\n y2_2 = the_lines[1][1][1]\n\n # Original approach\n # angle_2 = round(90 - np.rad2deg(np.arctan2(abs(y2_2 - y1_2), abs(x2_2 - x1_2))), 2)\n\n angle_2 = round(np.rad2deg(np.arctan(abs(x2_2 - x1_2) / abs(y2_2 - y1_2))), 2)\n\n return round((angle_1 + angle_2) / 2, 2)\n\n else:\n x1 = the_lines[0][0][0]\n y1 = the_lines[0][0][1]\n x2 = the_lines[0][1][0]\n y2 = the_lines[0][1][1]\n\n # Original approach\n # return round(90 - np.rad2deg(np.arctan2(abs(y2 - y1), abs(x2 - x1))), 2)\n return round(np.rad2deg(np.arctan(abs(x2 - x1) / abs(y2 - y1))), 2)\n\n\nclass ResultsHandler:\n\n def __init__(\n self,\n save_path,\n line_thickness=2,\n window_name='window'\n ):\n self._window_name = window_name\n self._is_window_created = False\n self._sleeping_time = 1\n\n self._line_thickness = line_thickness\n self._font = cv2.FONT_HERSHEY_SIMPLEX\n self._font_scale = 1\n self._font_colour = (255, 0, 255)\n self._line_type = 3\n\n self.save_path = save_path\n\n @property\n def is_window_created(self):\n return self._is_window_created\n\n def create_window(self):\n cv2.namedWindow(self._window_name)\n self._is_window_created = True\n\n def show(self, image):\n cv2.imshow(self._window_name, image)\n\n def destroy_windows(self):\n cv2.destroyWindow(self._window_name)\n self._is_window_created = False\n\n def draw_lines_write_text(self,\n lines,\n image,\n angle):\n\n for line in lines:\n cv2.line(image,\n (line[0][0], line[0][1]),\n (line[1][0], line[1][1]),\n (0, 0, 255),\n self._line_thickness)\n\n cv2.putText(image,\n str(angle),\n (int(image.shape[1]*0.35), int(image.shape[0]*0.95)),\n self._font,\n self._font_scale,\n self._font_colour,\n self._line_type)\n\n return image\n\n def save_image(self,\n lines,\n image,\n image_name,\n angle):\n\n\n cv2.imwrite(os.path.join(self.save_path, image_name),\n self.draw_lines_write_text(lines, image, angle))\n\n def save_image_2(self,\n image_name,\n image):\n\n cv2.imwrite(\n os.path.join(self.save_path, image_name),\n image\n )\n\n def show_image(self,\n image,\n lines=None,\n angle=None):\n\n cv2.imshow('Extracted Image', image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n","repo_name":"EvgeniiTitov/pole_inclination_detection","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"11789782234","text":"import datetime\nimport logging\nimport time\n\nfrom data_hub.data_hub_item import DataHubItem\nfrom input.input_module import InputModule\n\n__author__ = \"Thorsten Biermann\"\n__copyright__ = \"Copyright 2015, Thorsten Biermann\"\n__email__ = \"thorsten.biermann@gmail.com\"\n\n\nclass TestDataGenerator(InputModule):\n \"\"\"\n Input module that generates test data.\n \"\"\"\n\n def __init__(self, data_hub):\n # call parent constructor\n super().__init__(data_hub=data_hub)\n\n # configure logging\n self._logger = logging.getLogger('TestDataInput')\n self._logger.info('Initializing')\n\n def run(self):\n self._logger.info('Running')\n\n while True:\n try:\n # create new item for data hub\n data_hub_item = DataHubItem('test', 'test data ' + str(datetime.datetime.now()))\n\n self._logger.debug('Genereated dummy data ' + str(data_hub_item))\n\n # hand over data hub item to data hub\n self._data_hub.put(data_hub_item)\n\n time.sleep(5)\n\n except(KeyboardInterrupt, SystemExit):\n break\n\n # close data hub queue\n self._data_hub.close()\n\n self._logger.info('Terminating')\n","repo_name":"0x74-0x62/flightbox","sub_path":"input/test_data_generator.py","file_name":"test_data_generator.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"}
+{"seq_id":"12407462723","text":"import uuid\n\nimport numpy as np\nimport pandas as pd\n\nfrom .fakedataindex import FakeDataIndex\n\n\nclass DatetimeIndex(FakeDataIndex):\n def __init__(self, start=\"2016-01-01\", frequency: str = \"H\"):\n self.start = pd.to_datetime(start)\n self.frequency = frequency\n self.dtype = \"U20\"\n\n def generate(self, rows):\n \"\"\"\n Args:\n rows: The amount of rows to generate\n\n Returns: a series that can be used for an index\n \"\"\"\n current_date = self.start\n while True:\n yield pd.period_range(current_date, freq=self.frequency, periods=rows)\n current_date += pd.Timedelta(rows,self.frequency)\n","repo_name":"AAUDataDiscovery/FakeDataGenerator","sub_path":"FakeDataIndex/datetime_index.py","file_name":"datetime_index.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"28770912958","text":"#函数input()让程序暂停运行,等待用户输入一些文本。获取用户输入后, Python将其存储在一个变量中,以方便你使用。\nprompt = \"\\nTell me something, and I will repeat it back to you:\"\nprompt += \"\\nEnter 'quit' to end the program. \"\nmessage = \"\"\nwhile message != 'quit':\n message = input(prompt)\n print(message)\n\n#使用标志\nactive = True\nwhile active:\n message = input(prompt)\n if message == 'quit':\n active = False\n else:\n print(message)\n\n#使用 break 退出循环\nwhile active:\n message = input(prompt)\n if message == 'quit':\n break;\n print(message)\n#在循环中使用 continue\ncurrent_number = 0\nwhile current_number < 10:\n current_number += 1\n if current_number % 2 == 0:\n continue\n print(current_number)\n","repo_name":"cogoming/astartpy","sub_path":"chapter7/parrot.py","file_name":"parrot.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"3284034521","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nstudy_time = [2, 4, 6, 8, 10]\nscore = [81, 93, 91, 97, 98]\n\nx_data = np.array(study_time)\ny_data = np.array(score)\n\na = 0\nb = 0\n\nlearn_rate = 0.01\nepochs = 20000\n\nfor i in range(epochs):\n y_pred = x_data * a + b # 예측값\n error = y_data - y_pred # 실측값 - 예측값 = 오차\n \n a_diff = -(2 / len(x_data)) * sum(error * x_data)\n b_diff = -(2 / len(x_data)) * sum(error) \n\n a = a - learn_rate * a_diff\n b = b - learn_rate * b_diff\n\n if not i % 100:\n print(\"epoch: %d, 기울기: %f, 절편: %f\"%(i, a, b))\n\n\ny_pred = a * x_data + b # 최종 예측값\nplt.scatter(x_data, y_data) # 그래프에 점찍기\nplt.plot(x_data, y_pred)\nplt.show()","repo_name":"damoa-recommend/Linear-Regression","sub_path":"gradientDescent.py","file_name":"gradientDescent.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"31236435326","text":"import networkx as nx\nimport os\n\n\nALPHA = 0.8\n\ndircName = \"Calculated data\"\n\nlondon = 0\nliverpool = 0\nbirmingham = 0\nglasgowor = 0\nedinburgh = 0\ncardiff=0\nnewport=0\nswansea=0\nbelfast=0\naberdeen=0\nderry=0\nlisburn=0\n\n\ntotalLabourWeightTF = 0\ntotalConservativeWeightTF = 0\ntotalLiberalDemocratWeightTF = 0\ntotalBrexitWeight = 0\n\ntotalLabourWeight = 0\ntotalConservativeWeight = 0\ntotalLiberalDemocratWeight = 0\n\ntotalLabourPos = 0\ntotalLabourNeg = 0\ntotalConsPos = 0\ntotalConsNeg = 0\ntotalLiberalPos = 0\ntotalLiberalNeg = 0\ntotalBrexitPos = 0\ntotalBrexitNeg = 0\n\nfor subdir, dirs, files in os.walk(dircName):\n labourWeight = 0\n conservativeWeight = 0\n liberalDemocratWeight = 0\n brexitWeight = 0\n\n totalConservativeWeightTF *= ALPHA\n totalLabourWeightTF *= ALPHA\n totalLiberalDemocratWeightTF *= ALPHA\n \n #make a new graph for each batch\n G = nx.MultiGraph()\n G.add_node(\"london\", seats=.625) # single node with a value seats (seats being a form of weight)\n G.add_node(\"birmingham\", seats=.57)\n G.add_node(\"liverpool\", seats=.51)\n G.add_node(\"glasgow\", seats=.23)\n G.add_node(\"edinburgh\", seats=.18)\n G.add_node(\"cardiff\", seats=.18)\n G.add_node(\"newport\", seats=.14)\n G.add_node(\"swansea\", seats=.14)\n G.add_node(\"belfast\", seats=.12)\n G.add_node(\"aberdeen\", seats=.10)\n G.add_node(\"derry\", seats=.04)\n G.add_node(\"lisburn\", seats=.04)\n\n G.add_node(\"Labour-Party\")\n G.add_node(\"Conservative-Party\")\n G.add_node(\"Liberal-Democrats\")\n G.add_node(\"Brexit-Party\")\n\n for file in files:\n filename = os.path.abspath(os.path.join(subdir, file))\n with open(filename, 'r') as fin:\n header = fin.readline().strip().split(',')\n entries = []\n for line in fin:\n parts = line.strip().split(',')\n row = dict()\n for i, h in enumerate(header):\n row[h] = parts[i]\n\n entries.append(row)\n # while running through data assign the edges based of City, Party, sentimental value\n for (e) in entries:\n # print(\"{0}, {1}, {2} sentiment: {3}\".format(e['City'], e['Party'], e['Leader'], e['Sentiment']))\n city = e['City']\n party = e['Party']\n leader = e['Leader']\n\n #pure sentemental\n #weight = float(e['Sentiment Value'])\n\n #sentemntal with city weights\n weight = float(e['Sentiment Value'])*(G.nodes[city]['seats'])\n\n\n G.add_edge(city, party, weight=weight)\n if(party == \"\"):\n G.add_edge(city, leader, weight=weight)\n\n for (c, p, w) in G.edges.data('weight'):\n tempWeight = w\n # here we modify if the wight is negative we can change it\n if tempWeight < 0:\n tempWeight = tempWeight\n if p == \"Labour-Party\" or p == \"Jeremy-Corbyn\":\n labourWeight += tempWeight\n if w > 0:\n totalLabourPos += 1\n elif w < 0:\n totalLabourNeg += 1\n elif p == \"Conservative-Party\" or p == \"Borris-Johnson\":\n conservativeWeight += tempWeight\n if w > 0:\n totalConsPos += 1\n elif w < 0:\n totalConsNeg += 1\n elif p == \"Liberal-Democrats\" or p == \"jo-Swinson\":\n liberalDemocratWeight += tempWeight\n if w > 0:\n totalLiberalPos += 1\n elif w < 0:\n totalLiberalNeg += 1\n elif p == \"Brexit-Party\" or p == \"Nigel-Farage\":\n brexitWeight += tempWeight\n if w > 0:\n totalBrexitPos += 1\n elif w < 0:\n totalBrexitNeg += 1\n totalConservativeWeightTF += conservativeWeight\n totalLabourWeightTF += labourWeight\n totalLiberalDemocratWeightTF += liberalDemocratWeight\n\n totalConservativeWeight += conservativeWeight\n totalLabourWeight += labourWeight\n totalLiberalDemocratWeight += liberalDemocratWeight\n\n\n\nprint(\"conservativeWeight: \", totalConservativeWeight, \"pos: \", totalConsPos, \"neg: \", totalConsNeg)\nprint(\"labourWeight \", totalLabourWeight, \"pos: \", totalLabourPos, \"neg: \", totalLabourNeg)\nprint(\"liberalDemocratWeight \", totalLiberalDemocratWeight, \"pos: \", totalLiberalPos, \"neg: \", totalLiberalNeg)\n# print(\"brexitWeight \" , brexitWeight , \"pos: \" , totalBrexitPos , \"neg: \" , totalBrexitNeg)\n\nprint(\"conservativeWeight with time fading: \", totalConservativeWeightTF, \"pos: \", totalConsPos, \"neg: \", totalConsNeg)\nprint(\"labourWeight with time fading \", totalLabourWeightTF, \"pos: \", totalLabourPos, \"neg: \", totalLabourNeg)\nprint(\"liberalDemocratWeight with time fading \", totalLiberalDemocratWeightTF, \"pos: \", totalLiberalPos, \"neg: \", totalLiberalNeg)\n\n\n# printing nodes\n#print(\"Print all Nodes\")\n#print(G.nodes())\n#print(\"Print all edges\")\n#print(G.edges())\n\n\n\n\n\n\n\n# print(type(G.nodes()))\n# print(type(G.edges()))\n\n# print the network as a graph\n#nx.draw(G, with_labels=True, node_color='skyblue', node_size=250, edge_color='black', pos=nx.planar_layout(G))\n#plt.savefig(\"simple_path.png\")\n#plt.show()","repo_name":"adamsk34/Team-Cluster-Test-Repo","sub_path":"NETWORK-PYTHON PACKAGE/SNA.py","file_name":"SNA.py","file_ext":"py","file_size_in_byte":5124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"42667458332","text":"#스택/큐\n#프로그래머스\n#다리를 지나는 트럭\n\n\n#sum 사용하면 시간초과남\nfrom collections import deque\n\nbridge_length = 100\nweight = 100\ntruck_weights = [10]\n\ngoing = deque()\n# for i in range(0, bridge_length) :\n# going.append(0)\ngoing = deque([0 for _ in range(bridge_length)])\n \nhour = 0\nnext = 0\ngoing_weight = 0\nwhile len(going) != 0 :\n hour+=1\n out = going.popleft()\n going_weight -= out\n if truck_weights :\n #truck_weights가 빈 리스트 방지하려면 위와 같이!\n if going_weight + truck_weights[0] <= weight :\n next = truck_weights.pop(0)\n going.append(next)\n going_weight += next\n else : going.append(0)\n\nprint(hour)\n# while True :\n\n# if going_weight==0 and len(truck_weights)==0 :\n# break\n# if next == 0 and len(truck_weights) > 0 :\n# next = truck_weights.pop(0)\n# going_weight += next\n# hour+=1\n# out = going.popleft()\n# going_weight -= out\n# if going_weight + next <= weight :\n# going.append(next)\n# next= 0\n# else : going.append(0)\n# print(hour)\n\n","repo_name":"parkjeongmi/jamie_study","sub_path":"0530test/0526_3.py","file_name":"0526_3.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"3338595785","text":"#!/usr/bin/python3\n\n\"\"\" Module that defines a division in all the elements of a matrix \"\"\"\n\n\ndef matrix_divided(matrix, div):\n \"\"\"a division in all the elements of a matrix\n\n Args:\n matrix ([list]): [Matrix passed to process]\n div ([int, float]): [denominator to aply]\n\n Raises:\n TypeError: [if the matrix is not a list of lists or if rows\n does'n contain numbers ]\n TypeError: [Rows size Mismatch]\n ZeroDivisionError: [div is 0]\n \"\"\"\n\n if type(matrix) is not list or len(matrix) == 0 \\\n or not all([type(row) == list for row in matrix]):\n raise TypeError(\n \"matrix must be a matrix (list of lists) of integers/floats\")\n\n if not all([type(el) in [int, float] for row in matrix for el in row]):\n raise TypeError(\n \"matrix must be a matrix (list of lists) of integers/floats\")\n\n if any(list(map(lambda row: len(row) == 0, matrix))):\n raise TypeError(\n \"matrix must be a matrix (list of lists) of integers/floats\")\n\n if not all(list(map(lambda row: len(matrix[0]) == len(row), matrix))):\n raise TypeError(\"Each row of the matrix must have the same size\")\n\n if type(div) not in [int, float]:\n raise TypeError(\"div must be a number\")\n\n if div == 0:\n raise ZeroDivisionError(\"division by zero\")\n\n return[[(el / div).__round__(2) for el in row] for row in matrix]\n","repo_name":"JuanSebastianGB/holbertonschool-higher_level_programming","sub_path":"0x07-python-test_driven_development/2-matrix_divided.py","file_name":"2-matrix_divided.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"37269139436","text":"# -*- coding:utf-8 -*-\r\n\r\n'''\r\nCreated on 2016年1月19日\r\n\r\n@author: chch\r\n'''\r\n\r\nimport tcp\r\nimport udp\r\nimport esp\r\nimport stream\r\nfrom decode_utils import err, dec_string, str_payload_layer, hex_string, str_to_int\r\nimport ipv6_fragment\r\nfrom ip_fragment_set import IP_FRAGMENT_SET\r\n\r\nclass IPV4():\r\n def __init__(self,father):\r\n self.father=father\r\n self.layer_name='ipv4'\r\n self.version=None,\r\n self.header_length=None,\r\n self.diff_service_field=None,\r\n self.total_length=None,\r\n self.id=None\r\n self.flags=None\r\n self.dont_fragment=None\r\n self.more_fragments=None\r\n self.fragment_offset=None\r\n self.ttl=None\r\n self.protocol=None\r\n self.header_checksum=None\r\n self.src=None\r\n self.dst=None\r\n self.options=None\r\n self.payload=None\r\n self.payload_length=None\r\n self.payload_layer=None\r\n\r\n def upper_layer_selector(self,protocol_code):\r\n selector = {\r\n 6: tcp.TCP,\r\n 17: udp.UDP,\r\n }\r\n # Get the function from switcher dictionary\r\n func = selector.get(protocol_code, stream.STREAM)\r\n # Execute the function\r\n return func(self)\r\n\r\n def decode(self,packet_data,length):\r\n if length<20:\r\n err(\"ipv4 decode error: insufficient length(%s)\"%(length))\r\n return None\r\n else:\r\n self.version=ord(packet_data[0])>>4\r\n self.header_length=(ord(packet_data[0])&0x0f)*4\r\n if length < self.header_length:\r\n err(\"ipv4 header decode error:insufficient length(%s)\"%(length))\r\n return None\r\n self.diff_service_field=ord(packet_data[1])\r\n self.total_length=ord(packet_data[2])*256+ord(packet_data[3])\r\n self.id=ord(packet_data[4])*256+ord(packet_data[5])\r\n self.flags=ord(packet_data[6])>>5\r\n self.dont_fragment=(self.flags&0x02)>>1\r\n self.more_fragments=self.flags&0x01\r\n self.fragment_offset=(ord(packet_data[6])&0x1f)*256+ord(packet_data[7])\r\n self.ttl=ord(packet_data[8])\r\n self.protocol=ord(packet_data[9])\r\n self.header_checksum=ord(packet_data[10])*256+ord(packet_data[11])\r\n self.src=packet_data[12:16]\r\n self.dst=packet_data[16:20]\r\n self.options=packet_data[20:self.header_length]\r\n self.payload=packet_data[self.header_length:]\r\n self.payload_length=length-self.header_length\r\n if self.payload_length>len(self.payload):\r\n return None\r\n \r\n if self.more_fragments==0 and self.fragment_offset==0:\r\n self.payload_layer=self.upper_layer_selector(self.protocol)\r\n else: ## should be check with queued ip fragments. now we just take it as a stream\r\n data=IP_FRAGMENT_SET.combine(self)\r\n if data!=None:\r\n self.payload,self.payload_length=data\r\n self.payload_layer=self.upper_layer_selector(self.protocol)\r\n else:\r\n self.payload_layer=stream.STREAM(self)\r\n\r\n if self.payload_layer:\r\n if self.payload_layer.decode(self.payload, self.payload_length)==None:\r\n self.payload_layer=None\r\n\r\n return self\r\n \r\n\r\n \r\n def __str__(self):\r\n return \"ipv4(version=%s, src=%s, dst=%s, protocol=%s, payload_layer=%s)\"%(str(self.version),dec_string(self.src,4,'.'),dec_string(self.dst, 4, '.'),self.protocol,str_payload_layer(self.payload_layer))\r\n\r\n def to_dict(self):\r\n d={}\r\n d['layer_name']='ipv4'\r\n d['version']=self.version\r\n d['header_length']=self.header_length\r\n d['diff_service_field']=self.diff_service_field\r\n d['total_length']=self.total_length\r\n d['id']=self.id\r\n d['flags']=self.flags\r\n d['dont_fragment']=self.dont_fragment\r\n d['more_fragments']=self.more_fragments\r\n d['fragment_offset']=self.fragment_offset\r\n d['ttl']=self.ttl\r\n d['protocol']=self.protocol\r\n d['header_checksum']=self.header_checksum\r\n d['src']=dec_string(self.src,4,'.')\r\n d['dst']=dec_string(self.dst,4,'.')\r\n d['options']=self.options\r\n d['payload_length']=self.payload_length\r\n\r\n d['payload_layer']=self.payload_layer.to_dict() if self.payload_layer else None\r\n \r\n return d\r\n\r\nclass IPV6():\r\n def __init__(self,father):\r\n self.father=father\r\n self.layer_name='ipv6'\r\n self.version=None,\r\n self.traffic_class=None\r\n self.tc_diff_service_codepoint=None,\r\n self.tc_explicit_congestion_notification=None\r\n self.flow_label=None\r\n self.payload_length_h=None\r\n self.next_header=None\r\n self.hop_limit=None\r\n self.src=None\r\n self.dst=None\r\n self.payload=None\r\n self.payload_length=None\r\n self.payload_layer=None\r\n\r\n def upper_layer_selector(self,protocol_code):\r\n selector = {\r\n 6: tcp.TCP,\r\n 17: udp.UDP,\r\n 44: ipv6_fragment.IPV6_FRAGMENT,\r\n 50: esp.ESP\r\n }\r\n func = selector.get(protocol_code, stream.STREAM)\r\n return func(self)\r\n\r\n \r\n def decode(self,packet_data,length):\r\n if length<40:\r\n err(\"ipv6 decode error: insufficient length(%s)\"%(length))\r\n return None\r\n else:\r\n self.version=ord(packet_data[0])>>4\r\n self.traffic_class=((ord(packet_data[0])&0x0f)<<4)+((ord(packet_data[1])&0xf0)>>4)\r\n self.tc_diff_service_codepoint=self.traffic_class >> 2\r\n self.tc_explicit_congestion_notification=self.traffic_class & 0x3\r\n self.flow_label=(ord(packet_data[1])&0x0f)*65536+str_to_int(packet_data[2:4], 2)\r\n self.payload_length_h=str_to_int(packet_data[4:6], 2)\r\n self.next_header=ord(packet_data[6])\r\n self.hop_limit=ord(packet_data[7])\r\n self.src=packet_data[8:24]\r\n self.dst=packet_data[24:40]\r\n \r\n self.payload_length=length-40\r\n self.payload=packet_data[40:]\r\n if self.payload_length>len(self.payload):\r\n return None\r\n \r\n self.payload_layer = self.upper_layer_selector(self.next_header)\r\n \r\n if self.payload_layer.decode(self.payload, self.payload_length) ==None:\r\n self.payload_layer=None\r\n\r\n return self\r\n\r\n def __str__(self):\r\n return \"ipv6(version=%s, src=%s, dst=%s, next_header=%s, payload_layer=%s)\"%(str(self.version),hex_string(self.src,16,''),hex_string(self.dst, 16, ''),self.next_header,str_payload_layer(self.payload_layer))\r\n\r\n\r\n def to_dict(self):\r\n d={}\r\n d['layer_name']='ipv6'\r\n d['version']=self.version\r\n d['traffic_class']=self.traffic_class\r\n d['tc_diff_service_codepoint']=self.tc_diff_service_codepoint\r\n d['tc_explicit_congestion_notification']=self.tc_explicit_congestion_notification\r\n d['flow_label']=self.flow_label\r\n d['payload_length_h']=self.payload_length_h\r\n d['next_header']=self.next_header\r\n d['hop_limit']=self.hop_limit\r\n d['src']=hex_string(self.src,16,'')\r\n d['dst']=hex_string(self.dst,16,'')\r\n \r\n d['payload_length']=self.payload_length\r\n d['payload_layer']=self.payload_layer.to_dict() if self.payload_layer else None\r\n \r\n return d\r\n","repo_name":"libaoyuan242/pcap-anlysis-with-python","sub_path":"src_python/ip.py","file_name":"ip.py","file_ext":"py","file_size_in_byte":7616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"22363045216","text":"from typing import Optional, Dict, Any, List\n\nfrom openstack_query.handlers.client_side_handler import ClientSideHandler\nfrom openstack_query.handlers.prop_handler import PropHandler\nfrom openstack_query.handlers.server_side_handler import ServerSideHandler\n\nfrom enums.query.query_presets import QueryPresets\nfrom enums.query.props.prop_enum import PropEnum\n\nfrom exceptions.parse_query_error import ParseQueryError\nfrom exceptions.query_preset_mapping_error import QueryPresetMappingError\nfrom exceptions.query_property_mapping_error import QueryPropertyMappingError\n\nfrom custom_types.openstack_query.aliases import ClientSideFilterFunc, ServerSideFilters\n\n\nclass QueryBuilder:\n \"\"\"\n Helper class to handle setting and validating query parameters - primarily parsing 'where()' arguments to get\n filter function or kwarg params to use when running query\n \"\"\"\n\n def __init__(\n self,\n prop_handler: PropHandler,\n client_side_handlers: List[ClientSideHandler],\n server_side_handler: Optional[ServerSideHandler],\n ):\n self._client_side_handlers = client_side_handlers\n self._prop_handler = prop_handler\n self._server_side_handler = server_side_handler\n\n self._client_side_filter = None\n self._server_side_filters = None\n\n @property\n def client_side_filter(self) -> Optional[ClientSideFilterFunc]:\n \"\"\"\n a getter method to return the client-side filter function\n \"\"\"\n return self._client_side_filter\n\n @property\n def server_side_filters(self) -> Optional[ServerSideFilters]:\n \"\"\"\n a getter method to return server-side filters to pass to openstacksdk\n \"\"\"\n return self._server_side_filters\n\n def parse_where(\n self,\n preset: QueryPresets,\n prop: PropEnum,\n preset_kwargs: Optional[Dict[str, Any]] = None,\n ) -> None:\n \"\"\"\n method which parses and builds a filter function and (if possible) a set of openstack filter kwargs that\n corresponds to a given preset, property and set of preset arguments\n :param preset: Name of query preset to use\n :param prop: Name of property that the query preset will act on\n :param preset_kwargs: A set of arguments to pass to configure filter function and filter kwargs\n \"\"\"\n\n if self._client_side_filter:\n raise ParseQueryError(\"Error: Already set a query preset\")\n\n prop_func = self._prop_handler.get_prop_func(prop)\n if not prop_func:\n # If you are here from a search, you have likely forgotten to add it to the\n # client mapping variable in your Query object\n raise QueryPropertyMappingError(\n f\"\"\"\n Error: failed to get property mapping, given property\n {prop.name} is not supported in prop_handler\n \"\"\"\n )\n\n preset_handler = self._get_preset_handler(preset, prop)\n self._client_side_filter = preset_handler.get_filter_func(\n preset=preset,\n prop=prop,\n prop_func=prop_func,\n filter_func_kwargs=preset_kwargs,\n )\n self._server_side_filters = self._server_side_handler.get_filters(\n preset=preset, prop=prop, params=preset_kwargs\n )\n\n def _get_preset_handler(\n self, preset: QueryPresets, prop: PropEnum\n ) -> ClientSideHandler:\n \"\"\"\n method which returns a preset handler object which supports the corresponding preset and property pair\n :param preset: A given preset that describes the query type\n :param prop: A prop which the preset will act on\n \"\"\"\n\n # Most likely we have forgotten to add a mapping for a preset at the client-side\n # All presets should have a client-side handler associated to it\n if not any(i.preset_known(preset) for i in self._client_side_handlers):\n raise QueryPresetMappingError(\n \"A preset with no known client side handler was passed. Please raise an issue with the repo maintainer\"\n )\n\n for i in self._client_side_handlers:\n if i.check_supported(preset, prop):\n return i\n\n raise QueryPresetMappingError(\n f\"Error: failed to get preset mapping, the preset '{preset.name}' cannot be \"\n f\"used on the property '{prop.name}' given.\\n\"\n f\"If you believe it should, please raise an issue with repo maintainer\"\n )\n","repo_name":"meoflynn/st2-cloud-pack","sub_path":"lib/openstack_query/query_builder.py","file_name":"query_builder.py","file_ext":"py","file_size_in_byte":4516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"}
+{"seq_id":"34797346857","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser\nfrom django.contrib.auth.models import Permission, Group\nfrom user_interface.models import Place\n\n\nclass Manager(AbstractUser):\n id_manager = models.IntegerField(\"Код менеджера\", primary_key=True)\n place_id = models.ForeignKey(Place, on_delete=models.CASCADE)\n login = models.CharField(\"Логин\", max_length=32, unique=True)\n password = models.CharField(\"Пароль\", max_length=64)\n email = models.EmailField(max_length=320)\n surname = models.CharField(\"Фамилия\", max_length=43 )\n name = models.CharField(\"Имя\", max_length=16)\n patronymic = models.CharField(\"Отчество\", max_length=20)\n\n groups = models.ManyToManyField(\n Group,\n verbose_name=('groups'),\n blank=True,\n help_text=(\n 'The groups this user belongs to. A user will get all permissions '\n 'granted to each of their groups.'\n ),\n related_name='manager_user_set' # добавлен related_name\n )\n user_permissions = models.ManyToManyField(\n Permission,\n verbose_name=('user permissions'),\n blank=True,\n help_text=('Specific permissions for this user.'),\n related_name='manager_user_set'\n )\n user_type = 'Manager'\n\n USERNAME_FIELD = 'login'\n REQUIRED_FIELDS = []\n\n def __str__(self):\n return self.login\n\n class Meta:\n verbose_name = \"Менеджер\"\n verbose_name_plural = \"Менеджер\"\n","repo_name":"jiezzzzzzz/restaurant-booking","sub_path":"restaurants/registration_manager/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"20942512612","text":"import os\nimport pytest\nimport shutil\nimport unittest\nfrom sonLib.bioio import TestStatus\nfrom sonLib.bioio import fastaRead\nfrom sonLib.bioio import getTempDirectory\n\nfrom toil.job import Job\n\n\"\"\"Base case used for testing the preprocessor and lastz repeat masking\n\"\"\"\n\n@pytest.mark.blast\n@TestStatus.needsTestData\nclass TestCase(unittest.TestCase):\n def setUp(self):\n unittest.TestCase.setUp(self)\n self.encodeRegion = \"ENm001\"\n self.encodePath = os.path.join(TestStatus.getPathToDataSets(), \"MAY-2005\")\n self.regionPath = os.path.join(self.encodePath, self.encodeRegion)\n self.tempDir = getTempDirectory(os.getcwd())\n self.tempOutputFile = os.path.join(self.tempDir, \"results1.txt\")\n self.toilDir = os.path.join(self.tempDir, \"toil\")\n self.toilOptions = Job.Runner.getDefaultOptions(self.toilDir)\n self.toilOptions.disableCaching = True\n\n def tearDown(self):\n unittest.TestCase.tearDown(self)\n shutil.rmtree(self.tempDir)\n\n def checkSequenceSetsEqualModuloSoftMasking(self, sequences1, sequences2):\n self.assertEqual(list(sequences1.keys()), list(sequences2.keys()))\n for seqName in list(sequences1.keys()):\n sequence1 = sequences1[seqName]\n sequence2 = sequences2[seqName]\n self.assertEqual(sequence1.upper(), sequence2.upper())\n\ndef getSequences(sequenceFile):\n sequences = {}\n fileHandle = open(sequenceFile, \"r\")\n for header, sequence in fastaRead(fileHandle):\n sequences[header] = sequence\n fileHandle.close()\n return sequences\n\ndef getMaskedBases(sequences):\n maskedBases = set()\n for header in list(sequences.keys()):\n sequence = sequences[header]\n for i in range(len(sequence)):\n base = sequence[i]\n if base.upper() != base or base == 'N':\n maskedBases.add((header, i, base))\n return maskedBases\n\ndef getLowerCaseBases(sequenceFile):\n #Counts lower case bases in fasta sequences\n from sonLib.bioio import fastaRead\n totalMasked = 0\n total = 0\n fileHandle = open(sequenceFile, \"r\")\n for header, sequence in fastaRead(fileHandle):\n for base in sequence:\n if base != base.upper():\n totalMasked += 1\n total += len(sequence)\n fileHandle.close()\n return total, totalMasked\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"ComparativeGenomicsToolkit/cactus","sub_path":"src/cactus/preprocessor/preprocessorTest.py","file_name":"preprocessorTest.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","stars":426,"dataset":"github-code","pt":"72"}
+{"seq_id":"33156565220","text":"#coding:utf8\nimport tornado.ioloop\nimport tornado.web\nimport os\n\nfrom handler.index import IndexHandler\nfrom handler.article import ArticleHandler,ApiArticleHandler\nfrom handler.login import LoginHandler,ApiLoginHandler\nfrom handler.register import RegisterHandler,ApiRegisterHandler\nfrom handler.modify import ModifyHandler,ApiModifyHandler\nfrom handler.comment import ApiCommentHandler,ArticleDetailsHandler\nfrom handler.img import ImgHandler, ApiImgHandler\nfrom handler.ho import HotHandler, HourHandler\n\ndef make_app():\n basedir = os.path.dirname(__file__)\n settings = {\n 'debug': True,\n 'template_path': os.path.join(basedir, 'template')\n }\n\n return tornado.web.Application([\n (r\"/\", IndexHandler),\n (r\"/article\", ArticleHandler),\n (r\"/api/article\", ApiArticleHandler),\n (r\"/login\", LoginHandler),\n (r\"/api/login\", ApiLoginHandler),\n (r\"/register\", RegisterHandler),\n (r\"/api/register\", ApiRegisterHandler),\n (r\"/modify\", ModifyHandler),\n (r\"/api/modify\", ApiModifyHandler),\n (r\"/api/comment\", ApiCommentHandler),\n (r\"/img\", ImgHandler),\n (r\"/api/img\", ApiImgHandler),\n (r\"/hot\", HotHandler),\n (r\"/hour\", HourHandler),\n (r\"/article/(\\d+)\", ArticleDetailsHandler)\n ], **settings)\n\nif __name__ == \"__main__\":\n app = make_app()\n app.listen(5006)\n tornado.ioloop.IOLoop.current().start()\n\n\n","repo_name":"pythoner100/dlyhy3","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"69948641512","text":"\"\"\"Various helpers\"\"\"\nimport os\nimport logging\nimport discord\nfrom discord.ext.commands import DefaultHelpCommand, HelpCommand\nfrom logging.handlers import RotatingFileHandler\nfrom models.data_models import Tournament, Coach\nfrom services import TournamentService\n\nROOT = os.path.dirname(__file__)\nlogger = logging.getLogger('discord')\nlogger.setLevel(logging.INFO)\nlogger.propagate = False\nhandler = RotatingFileHandler(\n os.path.join(ROOT,'..', 'logs','discord.log'), maxBytes=1000000,\n backupCount=5, encoding='utf-8', mode='a'\n)\nhandler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))\nlogger.addHandler(handler)\n\nwastebasket_emoji = '🗑️'\n\nclass LongMessage:\n \"\"\"Class to handle long message sending in chunks\"\"\"\n def __init__(self, channel, block=False):\n self.limit = 1994 # to allow ``` before and after\n self.parts = []\n self.channel = channel\n self.block = block\n\n def add(self, part):\n \"\"\"Adds part of long message\"\"\"\n self.parts.append(part)\n\n async def send(self):\n \"\"\"sends the message to channel in limit chunks\"\"\"\n for chunk in self.chunks():\n await self.channel.send(chunk)\n logger.info(\"Response:\\n%s\", '\\n'.join(self.lines()))\n\n def lines(self):\n \"\"\"transforms the message to lines\"\"\"\n lines = []\n for part in self.parts:\n lines.extend(part.split(\"\\n\"))\n return lines\n\n def chunks(self):\n \"\"\"Transform the lines to limit sized chunks\"\"\"\n lines = self.lines()\n while True:\n msg = \"```asciidoc\\n\" if self.block else \"\"\n if not lines:\n break\n while lines and len(msg + lines[0]) < self.limit:\n msg += lines.pop(0) + \"\\n\"\n if self.block:\n msg += '```'\n yield msg\n\nasync def sign(tournament_id, coach, ctx, admin=False):\n \"\"\"routine to sign a coach to tournament\"\"\"\n if admin:\n tourn = Tournament.query.filter_by(tournament_id=tournament_id).one_or_none()\n else:\n tourn = Tournament.query.filter_by(\n status=\"OPEN\", tournament_id=tournament_id\n ).one_or_none()\n if not tourn:\n raise ValueError(\"Incorrect **tournament_id** specified\")\n\n signup = TournamentService.register(tourn, coach, admin)\n add_msg = \"\" if signup.mode == \"active\" else \" as RESERVE\"\n await ctx.send(f\"Signup succeeded{add_msg}!!!\")\n return True\n\nasync def resign(tournament_id, coach, ctx, admin=False):\n \"\"\"routine to resign a coach to tournament\"\"\"\n if admin:\n tourn = Tournament.query.filter_by(tournament_id=tournament_id).one_or_none()\n else:\n tourn = Tournament.query.filter_by(\n status=\"OPEN\", tournament_id=tournament_id\n ).one_or_none()\n\n if not tourn:\n raise ValueError(\"Incorrect **tournament_id** specified\")\n\n if TournamentService.unregister(tourn, coach, admin):\n await ctx.send(f\"Resignation succeeded!!!\")\n\n coaches = [\n discord.utils.get(ctx.guild.members, id=str(signup.coach.disc_id))\n for signup in TournamentService.update_signups(tourn)\n ]\n msg = [coach.mention for coach in coaches if coach]\n msg.append(f\"Your signup to {tourn.name} has been updated from RESERVE to ACTIVE\")\n\n if len(msg) > 1:\n tourn_channel = discord.utils.get(\n ctx.bot.get_all_channels(), name='tournament-notice-board'\n )\n if tourn_channel:\n await tourn_channel.send(\"\\n\".join(msg))\n else:\n await ctx.send(\"\\n\".join(msg))\n return True\n\nasync def coach_unique(name, ctx):\n \"\"\"finds uniq coach by name\"\"\"\n coaches = Coach.find_all_by_name(name)\n if not coaches:\n raise ValueError(f\" __{name}__ not found!!!\")\n\n if len(coaches) > 1:\n emsg = f\" __{name}__ not **unique**!!!\\n\"\n emsg += \"Select one: \"\n for coach in coaches:\n emsg += coach.name\n emsg += \" \"\n await ctx.send(emsg)\n return None\n return coaches[0]\n\nclass ImperiumHelpCommand(DefaultHelpCommand):\n async def send_pages(self):\n \"\"\"A helper utility to send the page output from :attr:`paginator` to the destination.\"\"\"\n destination = self.get_destination()\n for page in self.paginator.pages:\n msg = await destination.send(page)\n await msg.add_reaction(wastebasket_emoji)","repo_name":"ttrnecka/imperium","sub_path":"bot/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"}
+{"seq_id":"20506756822","text":"import urllib.request, urllib.parse, urllib.error\r\nfrom bs4 import BeautifulSoup\r\n\r\nurl=input(\"Enter URL:\")\r\nCount= (int)(input(\"enter count: \"))\r\nposition=(int)(input(\"enter position: \"))\r\nfor i in range(Count):\r\n\tl=[]\r\n\tu=[]\r\n\twith urllib.request.urlopen(url) as response:\r\n\t\thtml = response.read()\r\n\t\tsoup = BeautifulSoup(html, \"html.parser\")\r\n\t\ttags = soup('a')\r\n\t\tfor tag in tags:\r\n\t\t\tu.append(tag.get('href', None))\r\n\t\t\tl.append(tag.contents[0])\r\n\t\tprint(u[position-1])\r\n\t\tprint(l[position-1])\r\n\t\turl=u[position-1]\t\r\n\r\n\r\n\t\r\n\t\t\r\n\r\n\r\n\t\t\t\r\n\t\t","repo_name":"RaviBalas/coursera-Week-4-Programs-that-Surf-the-Web","sub_path":"assignment2.py","file_name":"assignment2.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"32291033686","text":"# Author: Terry Keyrouz - https://github.com/terrytyk77\n\n# Import the necessary modules\nimport functions as my_func\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom math import sqrt\n\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.linear_model import Ridge\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\n\n# Constants\nTEST_SIZE_SCALE = 0.1 # How much from 0 to 1, is attributed to the test set from all the data\nUSELESS_COLUMNS = ['id', 'wind_speed', 'atmo_opacity']\nATTRIBUTE_COLUMNS = ['sol', 'ls', 'month', 'terrestrial_day', 'terrestrial_month', 'terrestrial_year']\nGOAL_COLUMNS = ['min_temp', 'max_temp', 'pressure']\n\nMODELS = {\n 'KNeighbors Regressor': KNeighborsRegressor(n_neighbors=2),\n 'Random Forest Regressor': RandomForestRegressor(n_estimators=500),\n 'Decision Tree Regressor': DecisionTreeRegressor(),\n 'Neural Networks Regressor': MLPRegressor(random_state=1, max_iter=1000),\n 'Ridge': Ridge()\n}\n\n# Console beauty\nnp.set_printoptions(threshold=10)\n\n# Create data frame from the .csv file\nMars_DF = pd.read_csv('mars-weather-dataset.csv')\n\n# Correlation Matrix Before Pre-processing\nprint(f'\\n{my_func.Colors.YELLOW}Before Pre-processing Data Shape: {my_func.Colors.RESET}{Mars_DF.shape}')\nprint(f'{Mars_DF.info()}\\n')\nmy_func.plot_correlation_matrix(plt, 'Before Pre-processing Correlation Matrix', Mars_DF)\n\n# As we can see before pre-processing any data, we have 3 object field\n# Which means they are not very relevant in the state they are for the model\n# We will have to convert them to something more meaningful for better prediction\n# As we can see the wind_speed has 0 non-null count which marks it as safe to remove\n\n# Pre-processing the data:\n# Drop id column, because it's just an irrelevant numerical value, that doesn't relate to any field\n# Drop wind_speed column, as they are all NaN which means they are not contributing to anything\n# Drop atmo_opacity column, because they are all 100% sunny, which renders this column irrelevant for our prediction\n# Removing all these columns is removing unwanted complexity.\nMars_DF = Mars_DF.drop(columns=USELESS_COLUMNS)\n\n# Remove incomplete data, the number of incomplete rows is minimal and will improve the robustness of our model\nMars_DF.dropna(inplace=True)\n\n# Date was a string field, converting it to datetime will be usable to generate more relevant information\n# Getting the day, month, and year as Integers will be more useful to predict the weather than a simple string\nDate = Mars_DF['terrestrial_date']\nDate = pd.to_datetime(Date)\nMars_DF['terrestrial_day'] = Date.dt.day\nMars_DF['terrestrial_month'] = Date.dt.month\nMars_DF['terrestrial_year'] = Date.dt.year\n\n# Drop the column 'terrestrial_date' as it won't be used anymore\nMars_DF = Mars_DF.drop(columns=['terrestrial_date'])\n\n# Convert the 'month' column into an int, by removing the word 'Month ' and converting it afterwards,\n# this will render the field 'month' more useful for the prediction\nMars_DF['month'] = np.int64(Mars_DF['month'].str.replace('Month ', ''))\n\n# Correlation Matrix After Pre-processing\nprint(f'{my_func.Colors.GREEN}After Pre-processing Data Shape: {my_func.Colors.RESET}{Mars_DF.shape}')\nprint(f'{Mars_DF.info()}\\n')\nmy_func.plot_correlation_matrix(plt, 'After Pre-processing Correlation Matrix', Mars_DF)\n\n# Create training sets\nX = Mars_DF.drop(columns=GOAL_COLUMNS)\nY = Mars_DF.drop(columns=ATTRIBUTE_COLUMNS)\n\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=TEST_SIZE_SCALE)\n\n# Test all the models\nfor key, model in MODELS.items():\n model.fit(X_train, Y_train)\n Y_predictions = model.predict(X_test)\n\n print(f'{my_func.Colors.CYAN}Algorithm: {my_func.Colors.RESET}{key}')\n print(f'{my_func.Colors.YELLOW}Actual Data:\\n{my_func.Colors.RESET}{Y_test}\\n')\n print(f'{my_func.Colors.GREEN}Predicted Data:\\n{my_func.Colors.RESET}{Y_predictions}\\n')\n\n mean_squared_error_score = mean_squared_error(Y_test, Y_predictions)\n root_mean_squared_error_score = sqrt(mean_squared_error_score)\n mean_absolute_error_score = mean_absolute_error(Y_test, Y_predictions)\n r_squared_coefficient_score = r2_score(Y_test, Y_predictions)\n\n print(f'{my_func.Colors.GREEN}MSE: {my_func.Colors.RESET}{mean_squared_error_score:.3f}')\n print(f'{my_func.Colors.GREEN}RMSE: {my_func.Colors.RESET}{root_mean_squared_error_score:.3f}')\n print(f'{my_func.Colors.GREEN}MAE: {my_func.Colors.RESET}{mean_absolute_error_score:.3f}')\n print(f'{my_func.Colors.GREEN}RSC: {my_func.Colors.RESET}{r_squared_coefficient_score:.3f}\\n')\n","repo_name":"terrytyk77/Machine-Learning-Mars","sub_path":"ML_Mars/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"19885643892","text":"import openc2\nimport pytest\nimport json\nimport sys\n\n\ndef test_slpf_actuator():\n foo = openc2.v10.SLPFActuator(hostname=\"hostname\")\n assert foo\n assert foo.hostname == \"hostname\"\n\n with pytest.raises(openc2.exceptions.ExtraPropertiesError):\n foo = openc2.v10.SLPFActuator(bad=\"bad\")\n\n foo = openc2.v10.SLPFActuator(named_group=\"named_group\")\n assert foo != None\n assert foo.named_group == \"named_group\"\n\n foo = openc2.v10.SLPFActuator(asset_id=\"asset_id\")\n assert foo != None\n assert foo.asset_id == \"asset_id\"\n\n # with the current specification SLPFActuator does not need any elements\n # and an empty asset_tuple is None\n foo = openc2.v10.SLPFActuator(asset_tuple=[])\n assert foo != None\n\n with pytest.raises(AttributeError):\n foo.asset_tuple\n\n # check that there is less than 10\n for s in range(1, 10):\n values = list(map(lambda x: str(x), list(range(s))))\n f = openc2.v10.SLPFActuator(asset_tuple=values)\n assert f.asset_tuple == values\n\n # max 10 items\n with pytest.raises(openc2.exceptions.InvalidValueError):\n openc2.v10.SLPFActuator(\n asset_tuple=list(map(lambda x: str(x), list(range(11))))\n )\n\n\ndef test_slpf_cmd():\n with pytest.raises(openc2.exceptions.MissingPropertiesError):\n openc2.v10.slpf.SLPF()\n\n with pytest.raises(openc2.exceptions.MissingPropertiesError):\n openc2.v10.slpf.SLPF(action=\"query\")\n\n with pytest.raises(openc2.exceptions.MissingPropertiesError):\n openc2.v10.slpf.SLPF(target=openc2.v10.Features())\n\n foo = openc2.v10.slpf.SLPF(action=\"query\", target=openc2.v10.Features())\n assert foo != None\n assert foo.action == \"query\"\n\n with pytest.raises(openc2.exceptions.InvalidValueError):\n openc2.v10.slpf.SLPF(action=\"create\", target=openc2.v10.Features())\n\n with pytest.raises(ValueError):\n openc2.v10.slpf.SLPF(action=\"query\", target=openc2.v10.targets.URI(uri=\"uri\"))\n\n foo = openc2.v10.slpf.SLPF(\n action=\"query\",\n target=openc2.v10.Features(),\n actuator=openc2.v10.SLPFActuator(hostname=\"hostname\"),\n )\n assert foo != None\n assert foo.action == \"query\"\n assert foo.actuator.hostname == \"hostname\"\n\n @openc2.v10.CustomActuator(\"x-thing\", [(\"id\", openc2.properties.StringProperty())])\n class MyCustomActuator(object):\n pass\n\n with pytest.raises(ValueError):\n openc2.v10.slpf.SLPF(\n action=\"query\",\n target=openc2.v10.Features(),\n actuator=MyCustomActuator(id=\"id\"),\n )\n","repo_name":"oasis-open/openc2-lycan-python","sub_path":"tests/test_slpf.py","file_name":"test_slpf.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"72"}
+{"seq_id":"4211490029","text":"from set import MySet\nfrom bloomFilter import BloomFilter\nimport math\nimport random\nimport string\nfrom timeit import default_timer as timer\n\nepsilon = 1e-2\nM = 29717\nN = int(- M * math.log(epsilon) / math.log(2) / math.log(2))\nK = int(math.log(2, math.e) * N / M)\n\n\ndef data():\n blacklist = set()\n for line in open('blacklist.txt'):\n blacklist.add(line.strip())\n return blacklist\n\n\ndef test_generator(data):\n test = random.sample(data, 500)\n for _ in range(500):\n len = random.randint(3, 10)\n random_str = ''.join(random.choice(string.ascii_letters) for _ in range(len))\n test.append(random_str)\n return test\n\n\ndef insertComparison(myset, bf):\n print('-------------------insert-------------------')\n names = {'james', 'jack', 'sue', 'luis', 'kevin', 'charlotte', 'leon', 'neo', 'destiny', 'paul'}\n tic = timer()\n for name in names:\n myset.insert(name)\n toc = timer()\n print(f'Set Time: {(toc - tic)*1e6/len(names)}us')\n tic = timer()\n for name in names:\n bf.insert(name)\n toc = timer()\n print(f'Bloom Filter Time: {(toc - tic)*1e6/len(names)}us')\n\n\ndef findComparison(names, myset, bf):\n print('-------------------find-------------------')\n set_result = []\n bf_result = []\n tic = timer()\n for name in names:\n set_result.append(myset.find(name))\n toc = timer()\n print(f'Set Time: {(toc - tic)*1e6/len(names)}us')\n tic = timer()\n for name in names:\n bf_result.append(bf.find(name))\n toc = timer()\n print(f'Bloom Filter Time: {(toc - tic)*1e6/len(names)}us')\n count = 0\n for i in range(len(set_result)):\n if set_result[i] == bf_result[i]:\n count += 1\n print(f'Accuracy: {float(count)/len(set_result)}')\n\n\ndef main():\n corpus = data()\n myset = MySet(corpus)\n bf = BloomFilter(corpus, K, N)\n insertComparison(myset, bf)\n test = test_generator(corpus)\n findComparison(test, myset, bf)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DraymondHIT/HIT-Advanced-Algorithms","sub_path":"Lab3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"}
+{"seq_id":"30135151836","text":"#!/usr/bin/python3\nimport sys\nimport time\n\nw = open(sys.argv[1], \"rb\")\nr = open(sys.argv[2], \"rb\")\nr.seek(0,2)\nw.seek(0,2)\nwhile(True):\n ri = r.read()\n if len(ri) > 0:\n print(\"R:\\t\", end=\"\", flush=True)\n for i in range(0, len(ri)):\n print(\"{:02x}\".format(ri[i]), end=\"\", flush=True)\n print(\"\\n\", flush=True)\n wi = w.read()\n if len(wi) > 0:\n print(\"W:\\t\", end=\"\", flush=True)\n for i in range(0, len(wi)):\n print(\"{:02x}\".format(wi[i]), end=\"\", flush=True)\n print(\"\", flush=True)\n","repo_name":"barleto/RVMON","sub_path":"helpers/liveuart.py","file_name":"liveuart.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"14201999481","text":"def chain(num):\n if num <= 0:\n return [0]\n seq = [int(num)]\n while 1 not in seq and 89 not in seq:\n seq.append(sum([int(c) ** 2 for c in str(seq[-1])]))\n return seq\n\n\ntarget = 10_000_000\namount = 0\nfor i in range(target):\n amount += 1 if chain(i)[-1] == 89 else 0\n if target > 10000:\n if i % 10000 == 0:\n print(\"Current i: %s\" % i)\n","repo_name":"Lordfirespeed/BunchaPythonStuff","sub_path":"Project Euler/#92 - Square Digit Chains.py","file_name":"#92 - Square Digit Chains.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"36098774420","text":"import itertools\nimport math\nimport re\n\n\ndef main():\n file = open(\"input\", \"r\")\n positions = [[int(x) for x in re.findall(\"-?[0-9]+\", line)]\n for line in file]\n initPositions = list(\n zip(positions[0], positions[1], positions[2], positions[3]))\n print(initPositions)\n\n velocities = [[0, 0, 0] for _ in range(len(positions))]\n file.close()\n\n combinations = list(itertools.combinations(range(len(positions)), 2))\n\n for step in range(600000):\n # for moon, vel in enumerate(velocities):\n # if not sum(map(abs, vel)):\n # print(f\"{moon}\\tStep: {step}\\t{positions[moon]}\")\n\n # Calculate Velocity Vectors\n for first, second in combinations:\n firstPos = positions[first]\n firstVel = velocities[first]\n secondPos = positions[second]\n secondVel = velocities[second]\n\n for i in range(len(firstPos)):\n if firstPos[i] < secondPos[i]:\n firstVel[i] += 1\n secondVel[i] -= 1\n elif firstPos[i] > secondPos[i]:\n firstVel[i] -= 1\n secondVel[i] += 1\n\n # Calculate Position Vectors\n for i, (pos, vel) in enumerate(zip(positions, velocities)):\n positions[i] = [sum(p) for p in zip(pos, vel)]\n\n for i in range(len(initPositions)):\n axisVector = tuple([pos[i] for pos in positions])\n # print(axisVector)\n if initPositions[i] == axisVector:\n print(i, step)\n\n # totalEnergy = sum(sum(map(abs, pos)) * sum(map(abs, vel))\n # for pos, vel in zip(positions, velocities))\n\n # print(totalEnergy)\n\n\ndef lcm(a, b):\n return a*b // math.gcd(a, b)\n\n\ndef task2():\n # x axis\n # 572663 - 286331 = 286332\n\n # y axis\n # 386103 - 193051 = 193052\n\n # z axis\n # 204711 - 102355 = 102356\n x = 286332\n y = 193052\n z = 102356\n\n lcmxy = lcm(x, y)\n lcmxyz = lcm(lcmxy, z)\n print(lcmxyz)\n\n\nif __name__ == \"__main__\":\n task2()\n # main()\n","repo_name":"EliTheCreator/AdventOfCode","sub_path":"2019/day12/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"170789919","text":"import logging\nimport os\nimport subprocess\n\nimport retrying\nimport yatest.common\n\nlog = logging.getLogger(__name__)\nwork_dir = yatest.common.work_path()\n\n\n@retrying.retry(\n retry_on_result=bool,\n wait_exponential_multiplier=1000, wait_exponential_max=10000,\n)\ndef wait_docker_container(container_name):\n cmd = ['docker', 'inspect', '-f', '\\'{{.State.Running}}\\'', container_name]\n res = subprocess.call(cmd)\n return res != 0\n\n\ndef destroy_docker_container(container_name):\n try:\n cmd = ['docker', 'stop', container_name]\n yatest.common.execute(cmd, shell=False, wait=True, cwd=work_dir)\n except Exception as e:\n log.error('Docker stop error: %s', e)\n try:\n cmd = ['docker', 'rm', container_name]\n yatest.common.execute(cmd, shell=False, wait=True, cwd=work_dir)\n except Exception as e:\n log.error('Docker rm error: %s', e)\n\n\ndef start_daemon(arc_bin_path, arc_conf_path):\n template_identifier_bin = yatest.common.binary_path(arc_bin_path)\n config = yatest.common.source_path(arc_conf_path)\n cmd = [template_identifier_bin, '--config', config]\n return yatest.common.execute(\n cmd, shell=False, check_sanitizer=True, wait=False, cwd=work_dir\n )\n\n\ndef shutdown_daemon(proc_name):\n return os.system('pkill -9 {proc_name}'.format(**locals()))\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"mail/tests_helpers/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"7836490986","text":"# -*- coding: utf-8 -*\r\n# @Time : 2023/9/4 10:45\r\n# @Author : 杨坤林\r\n# @File : PhaseLoss.py\r\n# @Software : PyCharm\r\nimport torch\r\nimport torch.nn as nn\r\n\r\n\r\n# 定义相位损失函数\r\nclass PhaseLoss(nn.Module):\r\n def __init__(self):\r\n super(PhaseLoss, self).__init__()\r\n\r\n def forward(self, predicted, target):\r\n # 计算傅里叶变换\r\n predicted_fft = torch.fft.fft2(predicted, dim=(-2, -1))\r\n target_fft = torch.fft.fft2(target, dim=(-2, -1))\r\n\r\n # 提取相位信息\r\n predicted_phase = torch.angle(predicted_fft)\r\n target_phase = torch.angle(target_fft)\r\n\r\n # 归一化相位信息到 [-π, π] 范围内\r\n predicted_phase = predicted_phase - 2 * torch.pi * torch.round(predicted_phase / (2 * torch.pi))\r\n target_phase = target_phase - 2 * torch.pi * torch.round(target_phase / (2 * torch.pi))\r\n\r\n # 计算相位损失\r\n phase_loss = torch.mean(torch.abs(predicted_phase - target_phase))\r\n\r\n return phase_loss\r\n\r\n\r\n# # 示例用法\r\n# if __name__ == '__main__':\r\n# # 创建两个示例特征图(大小相同)\r\n# predicted_feature_map = torch.randn(1, 1, 64, 64) # 示例的预测特征图\r\n# target_feature_map = torch.randn(1, 1, 64, 64) # 示例的目标特征图\r\n#\r\n# # 初始化相位损失函数\r\n# phase_loss_fn = PhaseLoss()\r\n#\r\n# # 计算相位损失\r\n# loss = phase_loss_fn(predicted_feature_map, target_feature_map)\r\n#\r\n# print(\"相位损失:\", loss.item())","repo_name":"kleenY/TCIR2MW","sub_path":"models/PhaseLoss.py","file_name":"PhaseLoss.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"}
+{"seq_id":"15113793719","text":"def gcd(a,b):\r\n if a>=b:\r\n if a%b==0:\r\n return b\r\n else:\r\n return gcd(a%b,b)\r\n else:\r\n if b%a==0:\r\n return a\r\n else:\r\n return gcd(a,b%a)\r\n \r\nN=int(input())\r\nHP=[int(i) for i in input().split()]\r\nans=HP[0]\r\nfor i in range(1,N):\r\n ans=gcd(ans,HP[i])\r\nprint(ans)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/abc118/C/4922851.py","file_name":"4922851.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"}
+{"seq_id":"23513770641","text":"import unittest2 as unittest\nfrom zExceptions import Redirect\n\n\nclass TestOpenIdExtraction(unittest.TestCase):\n identity = \"http://plone.myopenid.com\"\n server_response={\n \"openid.mode\" : \"id_res\",\n \"nonce\" : \"nonce\",\n \"openid.identity\" : \"http://plone.myopenid.com\",\n \"openid.assoc_handle\" : \"assoc_handle\",\n \"openid.return_to\" : \"return_to\",\n \"openid.signed\" : \"signed\",\n \"openid.sig\" : \"sig\",\n \"openid.invalidate_handle\" : \"invalidate_handle\",\n }\n\n def createPlugin(self):\n from plone.openid.tests.utils import MockPAS\n from plone.openid.tests.utils import MockSite\n from plone.openid.plugins.oid import OpenIdPlugin\n plugin=OpenIdPlugin(\"openid\")\n return plugin.__of__((MockPAS()).__of__(MockSite()))\n\n\n def testEmptyExtraction(self):\n \"\"\"Test if we do not invent credentials out of thin air.\n \"\"\"\n plugin=self.createPlugin()\n creds=plugin.extractCredentials(plugin.REQUEST)\n self.assertEqual(creds, {})\n\n\n def testEmptyStringIdentityExtraction(self):\n \"\"\"Test coverage for bug #7176. In the case where \"\" (i.e an empty\n string) is passed in as the identity via the request,\n we essentially want to ensure that a Redirect isn't raised, which\n would signify that an IOpenIdExtractionPlugin challenge was initialized.\n\n This test demonstrates our openid plugin's extractCredentials eliminates\n credentials that aren't in the openid.* namespace.\n \"\"\"\n plugin=self.createPlugin()\n plugin.REQUEST.form.update(self.server_response)\n plugin.REQUEST.form[\"__ac_identity_url\"]=\"\"\n creds=plugin.extractCredentials(plugin.REQUEST)\n self.assertFalse(creds.has_key(\"__ac_identity_url\"))\n\n @unittest.skip(\"This test fails randomly on Jenkins\")\n def testLeadingWhiteSpacesInIdentityExtraction(self):\n \"\"\"Test coverage for bug #11044. Cope with leading/trailing spaces.\n If a user has no concept of \"trailing whitespace\", it's hard to make\n him care about not hitting space in the wrong place.\"\"\"\n plugin=self.createPlugin()\n plugin.REQUEST.form.update(self.server_response)\n plugin.REQUEST.form[\"__ac_identity_url\"]=\" %s\" % self.identity\n self.assertRaises(Redirect,\n plugin.extractCredentials,\n plugin.REQUEST)\n\n @unittest.skip(\"This test fails randomly on Jenkins\")\n def testTrailingWhiteSpacesInIdentityExtraction(self):\n \"\"\"Test coverage for bug #11044. Cope with leading/trailing spaces.\n If a user has no concept of \"trailing whitespace\", it's hard to make\n him care about not hitting space in the wrong place.\"\"\"\n plugin=self.createPlugin()\n plugin.REQUEST.form.update(self.server_response)\n plugin.REQUEST.form[\"__ac_identity_url\"]=\"%s \" % self.identity\n self.assertRaises(Redirect,\n plugin.extractCredentials,\n plugin.REQUEST)\n\n @unittest.skip(\"This test fails randomly on Jenkins\")\n def testRedirect(self):\n \"\"\"Test if a redirect is generated for a login attempt.\n This test requires a working internet connection!\n \"\"\"\n plugin=self.createPlugin()\n plugin.REQUEST.form[\"__ac_identity_url\"]=self.identity\n self.assertRaises(Redirect,\n plugin.extractCredentials,\n plugin.REQUEST)\n\n\n def testPositiveOpenIdResponse(self):\n \"\"\"Test if a positive authentication is extracted.\n \"\"\"\n plugin=self.createPlugin()\n plugin.REQUEST.form.update(self.server_response)\n creds=plugin.extractCredentials(plugin.REQUEST)\n self.assertEqual(creds[\"openid.identity\"], self.identity)\n self.assertEqual(creds[\"openid.mode\"], \"id_res\")\n self.assertEqual(creds[\"openid.return_to\"], \"return_to\")\n\n\n def testNegativeOpenIdResponse(self):\n \"\"\"Check if a cancelled authentication request is correctly ignored.\n \"\"\"\n plugin=self.createPlugin()\n plugin.REQUEST.form.update(self.server_response)\n plugin.REQUEST.form[\"openid.mode\"]=\"cancel\"\n creds=plugin.extractCredentials(plugin.REQUEST)\n self.assertEqual(creds, {})\n\n @unittest.skip(\"This test fails randomly on Jenkins\")\n def testFormRedirectPriorities(self):\n \"\"\"Check if a new login identity has preference over openid server\n reponse.\n \"\"\"\n plugin=self.createPlugin()\n plugin.REQUEST.form.update(self.server_response)\n plugin.REQUEST.form[\"__ac_identity_url\"]=self.identity\n self.assertRaises(Redirect,\n plugin.extractCredentials, plugin.REQUEST)\n\n\n\ndef test_suite():\n from unittest import TestSuite, makeSuite\n suite=TestSuite()\n suite.addTest(makeSuite(TestOpenIdExtraction))\n return suite\n","repo_name":"plone/plone.openid","sub_path":"plone/openid/tests/testExtraction.py","file_name":"testExtraction.py","file_ext":"py","file_size_in_byte":5018,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"}
+{"seq_id":"1673628538","text":"# Функция поиска HCF\r\n#Использование алгоритма Евклида\r\n\r\ndef compute_hcf(x, y):\r\n while (y):\r\n x,y = y, x % y\r\n return x\r\n\r\nhcf = compute_hcf(300,400)\r\nprint (\"The HCF is\", hcf)","repo_name":"WaiperOK/HCF","sub_path":"hcf.py","file_name":"hcf.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"40168149930","text":"from dataclasses import dataclass, field\nfrom typing import List, Union\n\nimport numpy as np\nfrom bokeh.io import export_png, export_svg\nfrom bokeh.models import ColumnDataSource, LinearColorMapper, ColorBar, Range1d\nfrom bokeh.palettes import Viridis256\nfrom bokeh.plotting import figure, show\nfrom scipy.spatial import Voronoi\n\nfrom utils import Point\n\n\n@dataclass\nclass VoronoiCell:\n centroid: Point\n vertices: List[Point]\n\n\n@dataclass\nclass VoronoiTessellation:\n centroids: List[Point]\n tessellation: List[VoronoiCell] = field(init=False)\n cells: List[VoronoiCell] = field(init=False)\n M: float = field(init=False, default=20.)\n outer_box: List[Point] = field(init=False)\n\n def __post_init__(self):\n self.outer_box = [np.array([0, -self.M]), np.array([0, self.M]), np.array([self.M, 0]), np.array([-self.M, 0])]\n # self.outer_box = [np.array([-self.M, -self.M]), np.array([-self.M, self.M]), np.array([self.M, -self.M]), np.array([self.M, self.M])]\n self.build_tessellation()\n\n def build_tessellation(self):\n pts = self.centroids.copy() + self.outer_box\n vor = Voronoi(pts)\n\n self.cells = []\n for i in range(len(self.centroids)):\n region_number = vor.point_region[i]\n vertices = [[vor.vertices[index][0], vor.vertices[index][1]] for index in vor.regions[region_number]]\n self.cells.append(VoronoiCell(centroid=self.centroids[i], vertices=vertices))\n\n def update_cells(self, centroids: List[Point]):\n self.centroids = centroids.copy()\n self.build_tessellation()\n\n def save_quantization_bokeh(self, name, probabilities, show_color_bar=True):\n plot = self.plot_quantization_bokeh(probabilities, show_color_bar=show_color_bar)\n export_svg(plot, filename=name+'.svg')\n export_png(plot, filename=name+'.png')\n\n def show_quantization_bokeh(self, probabilities: Union[List[float], None], show_color_bar=True):\n plot = self.plot_quantization_bokeh(probabilities, show_color_bar=show_color_bar)\n show(plot)\n\n def plot_quantization_bokeh(self, probabilities: List[float], show_color_bar=True):\n size_dots = 25 if len(self.cells) < 75 else 20\n\n width_color_bar = 20\n palette = Viridis256\n\n x_vertices = list()\n y_vertices = list()\n x_centroid = list()\n y_centroid = list()\n for i, cell in enumerate(self.cells):\n x_vertices.append([s[0] for s in cell.vertices])\n y_vertices.append([s[1] for s in cell.vertices])\n x_centroid.append(cell.centroid[0])\n y_centroid.append(cell.centroid[1])\n\n df = dict()\n df['x_vertices'] = x_vertices\n df['y_vertices'] = y_vertices\n df['x_centroid'] = x_centroid\n df['y_centroid'] = y_centroid\n # max_weight = max(probabilities)\n # df['ws'] = [w/max_weight for w in grid.cellsWeight]\n if probabilities is not None:\n df['ws'] = probabilities\n else:\n df['ws'] = range(0, len(self.cells))\n\n dfsource = ColumnDataSource(data=df)\n\n # palette.reverse()\n color_mapper = LinearColorMapper(palette=palette, low=min(df['ws']), high=max(df['ws']))\n plot = figure(\n plot_width=600+width_color_bar+30 if show_color_bar else 600,\n plot_height=600\n )\n left, right, bottom, top = -5, 5, -5, 5\n plot.x_range = Range1d(left, right)\n plot.y_range = Range1d(bottom, top)\n plot.patches(\n xs='x_vertices',\n ys='y_vertices',\n source=dfsource,\n line_width=2,\n fill_color={'field': 'ws', 'transform': color_mapper}\n )\n plot.dot(\n x='x_centroid',\n y='y_centroid',\n source=dfsource,\n size=size_dots,\n color='red'\n )\n if show_color_bar:\n color_bar = ColorBar(color_mapper=color_mapper, width=width_color_bar, location=(0, 0))\n plot.add_layout(color_bar, 'right')\n return plot\n","repo_name":"montest/stochastic-methods-optimal-quantization","sub_path":"tessellation.py","file_name":"tessellation.py","file_ext":"py","file_size_in_byte":4076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"22574812691","text":"EVENT = \"event\"\nEMAIL = \"email\"\nUSER = \"user\"\nID = \"id\"\nCARD = \"card\"\nNOT_ESTIMATED = \"not estimated\"\nMIN_TICKET_ESTIMATION = \"min_ticket_estimation\"\nMAX_TICKET_ESTIMATION = \"max_ticket_estimation\"\nAVG_TICKET_ESTIMATION = \"avg_ticket_estimation\"\nNOBODY_TICKET_ESTIMATION = \"TICKET IS NOT ESTIMATED BY ANYONE\"\nSTART_TIMER = \"start_timer\"\nSKIP_TICKET = \"skip_ticket\"\nUSERS_ESTIMATION = \"users_estimation\"\nFINAL_ESTIMATION = \"final_estimation\"\nESTIAMTION = \"estimation\"\nEND_GAME = \"end_game\"\nTICKET_ANALYSIS = \"ticket_analysis\"\nFETCH_TICKET = \"fetch_tickets\"\nGET_CURRENT_TICKET = \"get_current_ticket\"\nCARD_SELECTED = \"card_selected\"\nESTIMATED_CARD = \"estimate_card\"\nMANAGER_ROOM = \"manager_room\"\nPOKERBOARD = \"pokerboard\"\nSESSION = \"session\"\nDATA = \"data\"\nSENDER_CHANNEL_NAME = \"sender_channel_name\"\nCARD_SELECTED_BY_PLAYER = \"card_selected_by_player\"\nCARD_SELECTED_BY_PLAYER_FOR_MANAGER = \"card_selected_by_player_for_manager\"\nROLE = \"role\"\nPLAYER = 0\nNOT_INVITE = 'You are not invited to pokerboard.'\nPOKERBOARD_EXIST = 'Already Exist.'\nPOKERBOARD_MEMBER = 'Account does not exist.'\nUSER_NEED_SIGNUP = 'SignUp'\nDATA_NOT_DECRPTED = 'Your email is not encrypted with project key.'\n","repo_name":"jay-2001/PokerPlanner","sub_path":"BE-PokerPlanner/poker_board/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"19179190714","text":"from pytube import YouTube\n\n# Ask for the YouTube video URL\nurl = input(\"Enter the YouTube video URL: \")\n\n# Create a YouTube object with the video URL\nvideo = YouTube(url)\n\n# Ask the user whether to download video or audio\nchoice = input(\"Do you want to download the video or the audio? Enter V for video, A for audio: \")\n\n# Get the streams for the selected option\nif choice.lower() == 'v':\n # Get all the available video streams\n streams = video.streams.filter(progressive=True)\n # Print out the available quality options\n for i in range(len(streams)):\n print(f\"{i+1}. Resolution: {streams[i].resolution}, Format: {streams[i].mime_type}\")\nelse:\n # Get all the available audio streams\n streams = video.streams.filter(only_audio=True)\n # Print out the available quality options\n for i in range(len(streams)):\n print(f\"{i+1}. Bitrate: {streams[i].abr}, Format: {streams[i].mime_type}\")\n\n# Ask the user to select a quality option\nselection = int(input(\"Enter the number of the preferred option: \"))\n\n# Get the stream for the user-selected quality option\nstream = streams[selection - 1]\n\n# Ask the user for the file name to save the stream as\nfilename = input(\"Enter the file name to save the stream as (without extension): \")\n\n# Download the stream to the current directory with the user-specified file name\nstream.download(output_path='.', filename_prefix='Wessam_', filename=filename)\n","repo_name":"Wessam-K/Python","sub_path":"Youtube download.py","file_name":"Youtube download.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"18116323385","text":"class SeagullParser:\r\n\r\n ERR_TOKEN_MISMATCH = 1\r\n ERR_GET_SYMB = 2\r\n ERR_UNEXP_END_OF_PROG = 3\r\n ERR_INSTR_MISMATCH = 4\r\n ERR_EXP_FACTOR_MISMATCH = 5\r\n ERR_BOOL_EXPR_MISMATCH = 6\r\n\r\n def __init__(back, table_of_tokens: dict[int, tuple], const_table):\r\n back.tableOfForHiddenId = {}\r\n back.table_of_tokens = table_of_tokens\r\n back.num_row = 1\r\n back.token_count = len(table_of_tokens)\r\n back.postfix_notation = []\r\n back.tableOfLabel = {}\r\n back.f_success = True\r\n back.hidden_table = {}\r\n back.const_table = const_table\r\n\r\n def parse_token(back, lexeme, token, ident):\r\n if back.num_row > back.token_count+1:\r\n back.fail_parse(back.ERR_UNEXP_END_OF_PROG, (lexeme, token, back.num_row))\r\n\r\n line_num, lex, tok = back.get_symb()\r\n back.num_row += 1\r\n\r\n if (lex, tok) == (lexeme, token):\r\n print(ident + 'parseToken: В рядку {0} токен {1}'.format(line_num, (lexeme, token)))\r\n return True\r\n else:\r\n back.fail_parse(back.ERR_TOKEN_MISMATCH, (line_num, lex, tok, lexeme, token))\r\n return False\r\n\r\n def get_symb(back):\r\n return back.get_row(back.num_row)\r\n\r\n def parse_statement_list(back):\r\n print('\\t parseStatementList():')\r\n if back.parse_statement():\r\n back.parse_statement_list()\r\n return True\r\n\r\n def parse_statement(back):\r\n print('\\t\\t parseStatement():')\r\n num_line, lex, tok = back.get_symb()\r\n if tok == 'ident':\r\n back.postfix_notation.append((lex, tok, None))\r\n back.num_row += 1\r\n if back.get_symb()[-1] == 'assign_op':\r\n back.get_back()\r\n back.parse_assign()\r\n else:\r\n back.get_back()\r\n back.parse_expression()\r\n back.parse_token(';', 'op_end', '\\t' * 2)\r\n return True\r\n\r\n elif (lex, tok) == ('if', 'keyword'):\r\n back.parse_if()\r\n return True\r\n\r\n elif (lex, tok) == ('for', 'keyword'):\r\n back.parse_for()\r\n return True\r\n\r\n elif (lex, tok) == ('out', 'keyword'):\r\n back.parse_print()\r\n back.parse_token(';', 'op_end', '\\t' * 2)\r\n return True\r\n elif (lex, tok) == ('scan', 'keyword'):\r\n back.parse_scan()\r\n back.parse_token(';', 'op_end', '\\t' * 2)\r\n return True\r\n elif lex in ('integer', 'real', 'boolean') and tok == 'keyword':\r\n back.parse_declaration()\r\n back.parse_token(';', 'op_end', '\\t' * 2)\r\n return True\r\n elif (lex, tok) == ('}', 'end_block'):\r\n return False\r\n else:\r\n return False\r\n\r\n def fail_parse(back, error_code, what: tuple):\r\n back.f_success = False\r\n if error_code == back.ERR_UNEXP_END_OF_PROG:\r\n (lexeme, token, num_row) = what\r\n print(\r\n 'SeagullParser ERROR: \\n\\t Неочікуваний кінець програми - в таблиці символів (розбору) немає запису з '\r\n 'номером {1}. \\n\\t Очікувалось - {0}'.format(\r\n (lexeme, token), num_row))\r\n if error_code == back.ERR_GET_SYMB:\r\n (num_row) = what\r\n print(\r\n 'SeagullParser ERROR: \\n\\t Неочікуваний кінець програми - в таблиці символів (розбору) немає запису з '\r\n 'номером {0}. \\n\\t Останній запис - {1}'.format(\r\n num_row, back.table_of_tokens[num_row - 1]))\r\n elif error_code == back.ERR_TOKEN_MISMATCH:\r\n (num_line, lexeme, token, lex, tok) = what\r\n print('SeagullParser ERROR: \\n\\t В рядку {0} неочікуваний елемент ({1},{2}). \\n\\t Очікувався - ({3},{4}).'.format(\r\n num_line, lexeme, token, lex, tok))\r\n elif error_code == back.ERR_INSTR_MISMATCH:\r\n (num_line, lex, tok, expected) = what\r\n print(\r\n 'SeagullParser ERROR: \\n\\t В рядку {0} неочікуваний елемент ({1},{2}). \\n\\t Очікувався - {3}.'.format(num_line,\r\n lex, tok,\r\n expected))\r\n elif error_code == back.ERR_EXP_FACTOR_MISMATCH:\r\n (num_line, lex, tok, expected) = what\r\n print(\r\n 'SeagullParser ERROR: \\n\\t В рядку {0} неочікуваний елемент ({1},{2}). \\n\\t Очікувався - {3}.'.format(num_line,\r\n lex, tok,\r\n expected))\r\n elif error_code == back.ERR_BOOL_EXPR_MISMATCH:\r\n (num_line, lex, tok, expected) = what\r\n print(\r\n 'SeagullParser ERROR: \\n\\t В рядку {0} неочікуваний елемент ({1},{2}). \\n\\t Очікувався - {3}.'.format(num_line,\r\n lex, tok,\r\n expected))\r\n\r\n exit(error_code)\r\n\r\n def parse_assign(back):\r\n print('\\t' * 4 + 'parseAssign():')\r\n num_line, lex, tok = back.get_symb()\r\n\r\n back.num_row += 1\r\n\r\n print('\\t' * 5 + 'в рядку {0} - {1}'.format(num_line, (lex, tok)))\r\n if back.parse_token('=', 'assign_op', '\\t' * 5):\r\n back.parse_expression()\r\n back.postfix_notation.append(('=', 'assign_op', None))\r\n return True\r\n else:\r\n return False\r\n\r\n def parse_expression(back):\r\n print('\\t' * 5 + 'parseExpression():')\r\n num_row, lex, tok = back.get_symb()\r\n arithm_expr_parse_result = back.parse_arithm_expression()\r\n bool_expr_parse_result = back.parse_bool_expr()\r\n if arithm_expr_parse_result == False and bool_expr_parse_result == False:\r\n num_line, lex, tok = back.get_symb()\r\n back.fail_parse(back.ERR_EXP_FACTOR_MISMATCH,\r\n (num_line, lex, tok, 'boolean, bool_op, rel_op, integer, real, ident або \\'(\\' Expression '\r\n '\\')\\''))\r\n return True\r\n\r\n def parse_power(back):\r\n print('\\t' * 6 + 'parsePower():')\r\n back.parse_factor()\r\n num_line, lex, tok = back.get_symb()\r\n if tok == 'pow_op':\r\n back.num_row += 1\r\n back.parse_power()\r\n back.postfix_notation.append((lex, tok, None))\r\n return True\r\n\r\n def parse_term(back):\r\n print('\\t' * 6 + 'parseTerm():')\r\n if back.parse_power():\r\n numLine, lex, tok = back.get_symb()\r\n if tok == 'mult_op':\r\n back.num_row += 1\r\n print('\\t' * 6 + 'в рядку {0} - {1}'.format(numLine, (lex, tok)))\r\n back.parse_term()\r\n back.postfix_notation.append((lex, tok, None))\r\n if (lex, tok) == ('-', 'unar_minus'):\r\n back.num_row += 1\r\n print('\\t' * 6 + 'в рядку {0} - {1}'.format(numLine, (lex, tok)))\r\n back.parse_term()\r\n else:\r\n F = False\r\n return True\r\n else:\r\n return False\r\n\r\n def parse_factor(back):\r\n print('\\t' * 7 + 'parseFactor():')\r\n num_line, lex, tok = back.get_symb()\r\n print('\\t' * 7 + 'parseFactor():=============рядок: {0}\\t (lex, tok):{1}'.format(num_line, (lex, tok)))\r\n\r\n if tok in ('integer', 'real', 'ident', 'boolean'):\r\n back.postfix_notation.append((lex, tok, None))\r\n back.num_row += 1\r\n print('\\t' * 7 + 'в рядку {0} - {1}'.format(num_line, (lex, tok)))\r\n\r\n elif lex == '(':\r\n back.num_row += 1\r\n back.parse_arithm_expression()\r\n back.parse_token(')', 'par_op', '\\t' * 7)\r\n print('\\t' * 7 + 'в рядку {0} - {1}'.format(num_line, (lex, tok)))\r\n else:\r\n return False\r\n return True\r\n\r\n def parse_if(back):\r\n _, lex, tok = back.get_symb()\r\n if lex == 'if' and tok == 'keyword':\r\n back.num_row += 1\r\n back.parse_bool_expr()\r\n back.parse_token('{', 'start_block', '\\t' * 5)\r\n back.parse_statement_list()\r\n back.postfix_notation.append((':', 'colon'))\r\n back.parse_token('}', 'end_block', '\\t' * 5)\r\n return True\r\n else:\r\n return False\r\n\r\n def parse_bool_expr(back):\r\n num_line, lex, tok = back.get_symb()\r\n print('\\t' * 6 + 'parse_bool_expr: ' + 'в рядку {0} - {1}'.format(num_line, (lex, tok)))\r\n if tok == 'boolean':\r\n back.num_row += 1\r\n back.parse_bool_expr()\r\n back.postfix_notation.append((lex, tok, None))\r\n elif tok == 'ident':\r\n back.num_row += 1\r\n back.parse_bool_expr()\r\n back.postfix_notation.append((lex, tok, None))\r\n elif tok == 'rel_op':\r\n back.num_row += 1\r\n back.parse_arithm_expression()\r\n back.parse_bool_expr()\r\n back.postfix_notation.append((lex, tok, None))\r\n return True\r\n elif tok == 'bool_op':\r\n back.num_row += 1\r\n back.parse_bool_expr()\r\n back.postfix_notation.append((lex, tok, None))\r\n return True\r\n else:\r\n return False\r\n\r\n def parse_program(back):\r\n try:\r\n back.parse_token('program', 'keyword', '')\r\n num_line, lex, tok = back.get_symb()\r\n if (tok) == ('ident'):\r\n back.num_row += 1\r\n else:\r\n print('SeagullParser: Немає імені програми після ключового слова `program`'); return False\r\n back.parse_token('{', 'start_block', '')\r\n back.parse_statement_list()\r\n back.parse_token('}', 'end_block', '')\r\n print('SeagullParser: Синтаксичний аналіз завершився успішно')\r\n return True\r\n except SystemExit as e:\r\n print('SeagullParser: Аварійне завершення програми з кодом {0}'.format(e))\r\n\r\n def parse_for(back):\r\n num_line, lex, tok = back.get_symb()\r\n if (lex, tok) == ('for', 'keyword'):\r\n back.num_row += 1\r\n back.parse_assign()\r\n back.postfix_notation.append((lex, tok))\r\n\r\n back.parse_token('by', 'keyword', '')\r\n back.postfix_notation.append(('=', 'assign_op'))\r\n back.postfix_notation.append((':', 'colon'))\r\n back.parse_expression()\r\n back.parse_token('to', 'keyword', '')\r\n back.postfix_notation.append(('=', 'assign_op'))\r\n back.postfix_notation.append(('0', 'integer'))\r\n back.postfix_notation.append(('==', 'rel_op'))\r\n back.postfix_notation.append((lex, tok))\r\n back.postfix_notation.append((lex, tok))\r\n back.postfix_notation.append(('+', 'add_op'))\r\n back.postfix_notation.append(('=', 'assign_op'))\r\n back.postfix_notation.append((':', 'colon'))\r\n back.postfix_notation.append(('0', 'intnum'))\r\n back.postfix_notation.append(('=', 'assign_op'))\r\n back.postfix_notation.append((lex, tok))\r\n back.parse_expression()\r\n back.parse_token('do', 'keyword', '\\t' * 5)\r\n\r\n back.postfix_notation.append(('-', 'add_op'))\r\n back.postfix_notation.append(('*', 'mult_op'))\r\n back.postfix_notation.append(('0', 'intnum'))\r\n back.postfix_notation.append(('<', 'rel_op'))\r\n\r\n back.parse_statement()\r\n\r\n back.parse_token('rof', 'keyword', '')\r\n\r\n back.postfix_notation.append((':', 'colon'))\r\n\r\n return True\r\n else:\r\n return False\r\n\r\n def parse_print(back):\r\n _, lex, tok = back.get_symb()\r\n if (lex, tok) == ('out', 'keyword'):\r\n back.num_row += 1\r\n back.parse_token('(', 'par_op', '\\t' * 5)\r\n back.parse_factor()\r\n back.parse_inner_print()\r\n back.parse_token(')', 'par_op', '\\t' * 5)\r\n back.postfix_notation.append((lex, tok))\r\n return True\r\n else:\r\n return False\r\n\r\n def parse_inner_print(back):\r\n _, lex, tok = back.get_symb()\r\n if (lex, tok) == (',', 'punct'):\r\n back.num_row += 1\r\n back.parse_factor()\r\n back.parse_inner_print()\r\n return True\r\n else:\r\n return False\r\n\r\n def parse_scan(back):\r\n _, lex, tok = back.get_symb()\r\n if (lex, tok) == ('scan', 'keyword'):\r\n back.num_row += 1\r\n back.parse_token('(', 'par_op', '\\t' * 5)\r\n back.parse_var_list()\r\n back.parse_token(')', 'par_op', '\\t' * 5)\r\n back.postfix_notation.append((lex, tok))\r\n return True\r\n else:\r\n return False\r\n\r\n def parse_declaration(back):\r\n num_line, lex, tok = back.get_symb()\r\n back.num_row += 1\r\n if lex in ('integer', 'real', 'boolean') and tok == 'keyword':\r\n back.parse_var_init_list(lex)\r\n\r\n else:\r\n back.fail_parse(back.ERR_EXP_FACTOR_MISMATCH,\r\n (num_line, lex, tok, 'integer, real, boolean, ident або \\'(\\' Expression \\')\\''))\r\n\r\n def parse_arithm_expression(back):\r\n if back.parse_term():\r\n num_line, lex, tok = back.get_symb()\r\n if tok in 'add_op':\r\n back.num_row += 1\r\n print('\\t' * 6 + 'parse_arithm_expression: ' + f'в рядку {num_line} - {(lex, tok)}')\r\n back.parse_arithm_expression()\r\n back.postfix_notation.append((lex, tok, None))\r\n return True\r\n else:\r\n return False\r\n\r\n def parse_var_init_list(back, var_type):\r\n num_line, lex, tok = back.get_symb()\r\n back.num_row += 1\r\n\r\n if tok == 'ident':\r\n print('\\t' * 5 + f'в рядку {num_line} - {(lex, tok)}')\r\n back.postfix_notation.append((lex, tok, var_type))\r\n pass\r\n else:\r\n back.fail_parse(back.ERR_EXP_FACTOR_MISMATCH,\r\n (num_line, lex, tok, 'bracket_op, int, float, bool, ident або \\'(\\' Expression \\')\\''))\r\n num_line, lex, tok = back.get_symb()\r\n if lex == '=' and tok == 'assign_op':\r\n back.get_back()\r\n back.parse_assign()\r\n\r\n num_line, lex, tok = back.get_symb()\r\n if lex == ',' and tok == 'punct':\r\n back.num_row += 1\r\n back.parse_var_init_list(var_type)\r\n\r\n def get_row(back, index):\r\n if index > back.token_count:\r\n back.fail_parse(back.ERR_GET_SYMB, (index,))\r\n num_line, lexeme, token, _ = back.table_of_tokens[index]\r\n return num_line, lexeme, token\r\n\r\n def parse_var_list(back):\r\n num_line, lex, tok = back.get_symb()\r\n back.num_row += 1\r\n\r\n if tok == 'ident':\r\n print('\\t' * 5 + f'в рядку {num_line} - {(lex, tok)}')\r\n back.postfix_notation.append((lex, tok, ''))\r\n pass\r\n else:\r\n back.fail_parse(back.ERR_EXP_FACTOR_MISMATCH,\r\n (num_line, lex, tok, 'bracket_op, int, float, bool, ident або \\'(\\' Expression \\')\\''))\r\n\r\n num_line, lex, tok = back.get_symb()\r\n if lex == ',' and tok == 'punct':\r\n back.num_row += 1\r\n back.parse_var_list()\r\n\r\n def get_back(back):\r\n back.num_row -= 1","repo_name":"VovaChaika/seagullLanguage","sub_path":"Seagull_parser.py","file_name":"Seagull_parser.py","file_ext":"py","file_size_in_byte":16369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"7547805567","text":"# -*- coding: utf-8 -*-\n\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import ValidationError\n\n\nclass CrossoveredBudgetLinesSections(models.Model):\n _name = \"crossovered.budget.lines.sections\"\n _description = \"Budget Line Sections\"\n\n name = fields.Char(string=\"Description\")\n active = fields.Boolean(string=\"Active\", default=True)\n company_id = fields.Many2one('res.company', string=\"Company\", default=lambda self:self.env.user.company_id.id)\n\nclass CrossoveredBudgetLines(models.Model):\n _inherit = \"crossovered.budget.lines\"\n\n cap = fields.Float(related=\"analytic_account_id.cap\",string=\"Cap\")\n total_amount = fields.Float(string='Total Amount', compute='_compute_total_amount')\n correlated_amount = fields.Float(string=\"Encumbrance Amount\", copy=False)\n correlation_percentage = fields.Float(string='Encumbrance Percentage(%)', compute='_compute_correlation_percentage')\n consumed_amount = fields.Float(string='Consumed Amount')\n remaining_amount = fields.Float(string='Remaining amount', compute='_compute_remaining_amount')\n remaining_without_correlation = fields.Float(string='Available For Encumbrance', compute='_compute_available_for_correlation')\n remaining_without_correlation_perc = fields.Float(string='Available (%)', compute='_compute_available_for_correlation')\n section_id = fields.Many2one('crossovered.budget.lines.sections', string='Section Name')\n state = fields.Selection([\n ('draft', 'Draft'),\n ('cancel', 'Cancelled'),\n ('confirm', 'Confirmed'),\n ('validate', 'Validated'),\n ('done', 'Done')\n ], 'Status', default='draft', related='crossovered_budget_id.state')\n\n \n\n @api.depends('planned_amount')\n def _compute_total_amount(self):\n for rec in self:\n rec.total_amount = rec.planned_amount\n\n @api.depends('total_amount', 'correlated_amount')\n def _compute_correlation_percentage(self):\n for rec in self:\n rec.correlation_percentage = rec.correlated_amount and rec.total_amount and (rec.correlated_amount/rec.total_amount)*100.0\n\n @api.depends('consumed_amount', 'correlated_amount')\n def _compute_remaining_amount(self):\n for rec in self:\n rec.remaining_amount = rec.correlated_amount - rec.consumed_amount\n\n @api.depends('total_amount', 'correlated_amount')\n def _compute_available_for_correlation(self):\n for rec in self:\n rec.remaining_without_correlation = rec.total_amount - rec.correlated_amount\n rec.remaining_without_correlation_perc = rec.total_amount and ((rec.total_amount - rec.correlated_amount) / rec.total_amount) *100\n\n def check_cap(self):\n total_allocated_amount = 0.00\n for line_rec in self.search([('analytic_account_id', '=', self.analytic_account_id.id)]):\n total_allocated_amount += line_rec.total_amount\n if total_allocated_amount > self.cap:\n raise ValidationError(_(\"You have exceed cap amount for %s project.\" % self.analytic_account_id.name))\n return True\n\n @api.model\n def create(self, vals):\n rec = super(CrossoveredBudgetLines, self).create(vals)\n rec.check_cap()\n return rec\n \n @api.model\n def write(self, vals):\n rec = super(CrossoveredBudgetLines, self).write(vals)\n for rec in self:\n rec.check_cap()\n return rec \n\nclass AccountAnalyticAccount(models.Model):\n _inherit = \"account.analytic.account\"\n\n cap = fields.Float(\"Cap\")\n\nclass CrossoveredBudget(models.Model):\n _inherit = \"crossovered.budget\"\n\n\n @api.one\n @api.constrains('crossovered_budget_line','crossovered_budget_line.analytic_account_id','crossovered_budget_line.section_id','crossovered_budget_line.general_budget_id','crossovered_budget_line.date_from','crossovered_budget_line.date_to')\n def check_time_elapse(self):\n count = 0\n for line in self.crossovered_budget_line:\n count += 1\n analytic_account = line.analytic_account_id\n section = line.section_id\n budget_position = line.general_budget_id\n date_from = line.date_from\n date_to = line.date_to\n for rest in self.crossovered_budget_line[count:]:\n if analytic_account.id == rest.analytic_account_id.id and section.id == rest.section_id.id and budget_position.id == rest.general_budget_id.id and ((date_from >= rest.date_from and date_from <= rest.date_to) or (date_to >= rest.date_from and date_to <= rest.date_to)): \n raise ValidationError(_(\"There is time elapse !!\"))\n","repo_name":"slnee-it/SLNEE-MASTER","sub_path":"slnee_budget/models/budget.py","file_name":"budget.py","file_ext":"py","file_size_in_byte":4633,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"}
+{"seq_id":"33005508360","text":"# Perform intersection operation in 2 array\r\nlist1 = [1,23,7,89,231,45,78,11]\r\nlist2 = [2,78,23,111,56,78,0,9]\r\nlist3=[]\r\nprint(\"List 1 : \",list1)\r\nprint(\"List 2 : \",list2)\r\n\r\nlength=len(list1)+len(list2)\r\nfor i in list1:\r\n for j in list2:\r\n if i==j:\r\n list3.extend([i])\r\nprint(\"Intersection of list 1 and list 2 : \",list3)","repo_name":"Kashishkd77/Array-using-List-in-Python","sub_path":"Intersection.py","file_name":"Intersection.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"20872343838","text":"import random\nimport bokeh\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom Piloto import Piloto\nfrom Constructor import Constructor\nfrom Bebida import Bebida\nfrom Carrera import Carrera\nfrom Circuito import Circuito\nfrom Comida import Comida\nfrom Producto import Producto\nfrom Restaurante import Restaurante\nfrom Ubicacion import Ubicacion\nfrom Cliente import Cliente\nfrom Ticket import Ticket\nfrom Compra import Compra\n\n\"\"\"Convertir las estructuras de datos en objetos\"\"\"\ndef carrera_objects(edd, circuitos):\n carreras = []\n contador = 0\n for carrera in edd:\n mapa = crear_mapa()\n circ = circuitos[contador]\n rest = []\n for res in carrera['restaurants']:\n rest.append(res['name'])\n x = Carrera(carrera['round'],carrera['name'],circ, carrera['date'], rest,False, mapa, 0,0)\n carreras.append(x)\n contador+=1\n return carreras\n\ndef circuito_obj(edd):\n circuitos=[]\n for carrera in edd:\n loc = Ubicacion(carrera['circuit']['location']['lat'],carrera['circuit']['location']['long'],carrera['circuit']['location']['locality'],carrera['circuit']['location']['country'])\n x = Circuito(carrera['circuit']['circuitId'], carrera['circuit']['name'],loc)\n circuitos.append(x)\n return circuitos\n\ndef restaurantes_obj(edd):\n restaurantes = []\n productos = []\n for carrera in edd:\n for restaur in carrera['restaurants']:\n items = restaur['items']\n prod = []\n for i in items: \n # prod = []\n subtotal = i['price']\n subtotal = float(subtotal)\n subtotal = int(subtotal)\n prod_type = i['type'].split(':')\n if prod_type[0] == 'drink':\n if prod_type[1] == 'alcoholic':\n y = Bebida(i['name'],300, subtotal, round(subtotal*0.16,2) ,round(subtotal*1.16,2), True)\n prod.append(y)\n productos.append(y)\n else:\n y = Bebida(i['name'],300, subtotal, round(subtotal*0.16,2),round(subtotal*1.16,2), False)\n prod.append(y)\n productos.append(y)\n if prod_type[0] == 'food':\n if prod_type[1] == 'fast':\n y =Comida(i['name'],300, subtotal, round(subtotal*0.16,2),round(subtotal*1.16,2),'Comida de empaque')\n prod.append(y)\n productos.append(y)\n elif prod_type[1] == 'restaurant':\n y =Comida(i['name'],300, subtotal, round(subtotal*0.16,2),round(subtotal*1.16,2),'Comida de preparacion')\n prod.append(y)\n productos.append(y)\n x = Restaurante(restaur['name'],prod, carrera['name'])\n restaurantes.append(x)\n return restaurantes, productos\n\ndef constructores_objetos(edd,pilotos):\n puntaje = 0\n pilots, constructores = [] , []\n for builder in edd:\n pilots = []\n for piloto in pilotos:\n if piloto.team == builder['id']:\n pilots.append(piloto)\n x = Constructor(builder['id'],builder['name'],builder['nationality'],pilots, puntaje)\n constructores.append(x)\n return constructores\n \ndef pilotos_objetos(edd):\n puntaje = 0\n pilotos = []\n for pilot in edd:\n x = Piloto(pilot['id'], pilot['permanentNumber'],pilot['code'],pilot['team'],pilot['firstName'],pilot['lastName'],pilot['dateOfBirth'],pilot['nationality'],puntaje)\n pilotos.append(x)\n return pilotos\n \n'''Seleccionar opcion''' \ndef get_option():\n print('\\n🏎️ Bienvenido al programa de Formula 1 🏎️')\n while True:\n try:\n opt = int(input('\\nPresione 1 para ingresar a la gestion de carreras y equipos\\nPresione 2 para ingresar a la gestion de venta de entradas\\nPresione 3 para ingresar a la gestion de asistencia a las carreras\\nPresione 4 para ingresar a la gestion de restaurantes\\nPresione 5 para ingresar a la gestion de venta de restaurantes\\nPresione 6 para visualizar las estadisticas\\nPresione 7 para salir\\n\\nIndique el numero correspondiente a su eleccion: '))\n if opt not in range (1,8):\n raise Exception\n break\n except:\n print('\\nERROR - Por favor ingrese una opcion valida')\n return opt\n\n'''Gestion 1: Carreras y equipos''' \n'''Busqueda de constructores por paises'''\ndef filter_paises(constructores):\n print('\\nPaises disponibles:')\n paises = {'Alemania':'German','Austria':'Austrian','Francia':'French','Inglaterra':'British','Italia':'Italian','Suiza':'Swiss','Usa':'American'} \n for i, k in enumerate(paises.keys()):\n print('\\t',i+1,k)\n pais = input('Ingrese el nombre del pais del cual desea ver sus constructores:\\n>> ').title()\n while pais not in paises.keys():\n pais = input('\\nERROR - Opcion Invalida\\nIngrese el nombre del pais del cual desea ver sus constructores:\\n>> ').title()\n print(f\"\\nConstructores de {pais}\")\n for constructor in constructores:\n if constructor.nacionalidad == paises[pais]:\n print('\\n\\t🏁',constructor.nombre, '\\n\\tPuntaje: ',constructor.score)\n\n'''Ver pilotos de cada constructor'''\ndef filter_pilots(constructores):\n print('\\nConstructores disponibles:')\n for i, c in enumerate(constructores):\n print('\\t',i+1,c.nombre)\n choice = input('Ingrese el numero correspondiente al constructor cuyos pilotos desea visualizar:\\n>> ')\n while not choice.isnumeric() or int(choice) not in range(1,len(constructores)+1):\n choice = input('\\nERROR - Ingreso invalido\\nIngrese el numero correspondiente al constructor cuyos pilotos desea visualizar:\\n>> ')\n print(f\"\\nPilotos de {constructores[int(choice)-1].nombre}\")\n for p in constructores[int(choice)-1].pilotos:\n print('🚘',end=' ')\n p.mostrar()\n print('\\n')\n \n'''Buscar carreras por pais del circuito''' \ndef filter_carreras(carreras,circuitos):\n paises = []\n print('\\nPaises de los circuitos:')\n for c in circuitos:\n if c.location.pais == 'USA':\n c.location.pais = 'United States'\n if c.location.pais not in paises:\n paises.append(c.location.pais)\n for i, p in enumerate(paises):\n print('\\t',i+1,p)\n choice = input('Ingrese el nombre del pais cuyas carreras desea visualizar:\\n>> ').title()\n while choice not in paises and choice != 'Uk' and choice!= 'Uae':\n choice = input('\\nERROR - Ingreso invalido\\nIngrese el nombre del pais cuyas carreras desea visualizar\\n>> ').title()\n if choice == 'Uae' or choice == 'Uk':\n choice = choice.upper()\n print(f\"\\n----- CARRERAS EN {choice} -----\")\n for carrera in carreras:\n if carrera.circuito.location.pais == choice:\n print('***',end='')\n carrera.mostrar()\n \n'''Buscar carreras por mes'''\ndef filter_months(carreras):\n meses = {'enero':'01','febrero':'02','marzo':'03','abril':'04','mayo':'05','junio':'06','julio':'07','agosto':'08','septiembre':'09','octubre':'10','noviembre':'11','diciembre':'12'}\n mes = input('Ingrese el mes cuyas carreras desea observar:\\n>> ').lower()\n while mes not in meses.keys():\n mes = input('\\nERROR - Ingreso Invalido!\\nIngrese el mes cuyas carreras desea observar:\\n>> ').lower()\n contador = 0\n print(f\"Carreras durante el mes de {mes}\")\n for carrera in carreras:\n month = carrera.get_month()\n if month == meses[mes]:\n carrera.mostrar()\n contador += 1\n if contador == 0:\n print(f\"\\nNo hay carreras durante el mes de {mes}\")\n\n'''Funcion para obtener el podio de una carrera'''\ndef get_podio(pilotos, carreras, constructores):\n puntaje = {1:25,2:18,3:15,4:12,5:10,6:8,7:6,8:4,9:2,10:1}\n print('\\nCarreras sin finalizar:')\n n_invalidos = []\n for i,carrera in enumerate(carreras):\n if carrera.podio == False:\n print('\\t',i+1,carrera.nombre)\n else:\n n_invalidos.append(i+1)\n if len(n_invalidos) != len(carreras):\n choice = input('\\nIngrese el numero correspondiente a la carrera a finalizar:\\n>> ')\n while not choice.isnumeric() or int(choice) not in range (1,len(carreras)+1) or int(choice) in n_invalidos:\n choice = input('\\nERROR - Opcion invalida\\nPor favor ingrese el numero correspondiente a la carrera a finalizar:\\n>> ')\n podio = random.sample(pilotos, 10) \n contador = 1\n for piloto in podio:\n piloto.score += puntaje[contador]\n contador += 1\n for constructor in constructores:\n for pilot in constructor.pilotos:\n if pilot in podio:\n constructor.score += pilot.score\n carreras[int(choice)-1].podio = podio\n print(f\"\\nPodio final para la carrera {carreras[int(choice)-1].nombre}\")\n l=[]\n for i, piloto in enumerate(podio):\n pi = piloto.firstName +' '+ piloto.lastName\n sc= puntaje[i+1]\n d= [i+1 ,pi, sc]\n l.append(d)\n print(tabulate(l,headers=['Posicion','Piloto','Puntaje']))\n return pilotos, constructores, carreras\n else:\n print('\\nYa se han finalizado todas las carreras!') \n return pilotos, constructores, carreras\n\n'''Ver campeon mundial para el momento'''\ndef ganadores(pilotos, constructores):\n aux_pilotos = sorted(pilotos, key=lambda x: x.score, reverse=True)\n aux_constructores = sorted(constructores, key=lambda x: x.score,reverse=True)\n if aux_pilotos[0].score == 0:\n print('\\nAun no se ha finalizado ninguna carrera!')\n else:\n print(f'Campeon mundial:\\n\\tPiloto: {aux_pilotos[0].firstName} {aux_pilotos[0].lastName} con {aux_pilotos[0].score} puntos\\n\\tConstructor: {aux_constructores[0].nombre} con {aux_constructores[0].score} puntos')\n\n'''Gestion 2: Venta de entradas'''\n'''Obtener los datos del cliente y comprar entradas'''\ndef get_client_data(clientes,carreras, codigos):\n print('\\nPor favor ingrese los datos solicitados a continuacion para poder comprar su entrada!')\n nombre = input('Nombre completo: ')\n identificacion = input('Numero de identificacion (sin puntos!): ')\n while not identificacion.isnumeric() or identificacion in codigos:\n if not identificacion.isnumeric():\n print('\\nERROR - Ingreso Invalido - Por favor ingrese su identificacion sin puntos')\n if identificacion in codigos:\n print('\\nERROR - Ingreso Invalido - Ya exite un existe un ticket con la cedula ingresada')\n identificacion = input('\\nPor favor ingrese su numero de identificacion sin puntos: ')\n edad = input('Edad: ')\n while not edad.isnumeric() or int(edad) not in range(1,101):\n edad = input('\\nERROR - Ingreso Invalido\\nPor favor ingrese su edad reflejada en un numero entre el 0 y 100: ')\n print('\\nCircuitos disponibles:')\n for i, carrera in enumerate(carreras):\n print('\\t',i+1, carrera.nombre)\n circuito = input('Ingrese el numero correspondiente al circuito al cual desee asistir: ')\n while not circuito.isnumeric() or int(circuito) not in range(1,len(carreras)+1):\n circuito = input(f\"\\nERROR - Ingreso Invalido\\nPor favor ingrese un numero entre 1 y {len(carreras)+1} correspondiente al circuito a asistir: \")\n circuito = int(circuito)-1\n entradas = {'VIP': 340,'General':150}\n tipo_entrada = input('\\nTipo de entrada: \\n\\tPresione 1 para VIP\\n\\tPresione 2 para general\\n>> ')\n while not tipo_entrada.isnumeric() or int(tipo_entrada) not in range(1,3):\n tipo_entrada = input('\\nERROR - Ingreso Invalido\\nPresione 1 para VIP o presione 2 para general: ')\n if tipo_entrada == '1':\n tipo_entrada = 'VIP'\n else: \n tipo_entrada ='General'\n c = carreras[circuito]\n mapa_aux = c.mapa\n disponibles = 100-c.boletos_vendidos\n cantidad = input(f\"Cuantas entradas de tipo {tipo_entrada} desea? Solo hay {disponibles} entradas disponibles: \")\n while not cantidad.isnumeric() or int(cantidad) not in range(1,disponibles+1):\n cantidad = input(f\"\\nERROR - Ingreso Invalido\\nCuantas entradas de tipo {tipo_entrada} desea? Recuerde que solo hay {disponibles} entrads disponibles: \")\n cantidad = int(cantidad)\n asientos = get_asientos(cantidad, mapa_aux) \n subtotal = entradas[tipo_entrada] * cantidad\n descuento, disc = 0, False\n if num_ondulado(int(identificacion)) == True:\n disc = True\n descuento = subtotal*0.5\n iva = subtotal*0.16\n precio = subtotal+iva-descuento\n print(f\"\\n-----COMPRA DE {nombre.upper()}-----\\nCarrera: {carreras[circuito].nombre}\\nEntradas de tipo {tipo_entrada}: {cantidad}\\nAsientos: {asientos}\\nSubtotal: {subtotal}\\nDescuento: {descuento}\\nIVA: {iva}\\nMonto total: {precio}$\")\n if disc:\n print('Se ha aplicado un descuento del 50%')\n if input('\\nPresione cualquier tecla para confirmar orden o presione \"X\" para cancelar: ').title() != 'X':\n y = Ticket(tipo_entrada, cantidad, asientos, precio)\n x = Cliente(nombre,identificacion,edad,c.nombre,y, descuento)\n clientes.append(x)\n codigos.append(x.identificacion)\n # for asiento in asientos:\n # for fila,columna in asiento.items():\n # for carrera in carreras:\n # if carrera != c:\n # carrera.mapa[int(fila)-1][int(columna)-1]= False\n print('\\nSus tickets se han comprado con exito!')\n c.boletos_vendidos += y.cantidad\n c.mapa = mapa_aux\n return clientes, carreras, codigos\n else:\n for asiento in asientos:\n for fila,columna in asiento.items():\n c.mapa[int(fila)-1][int(columna)-1]= False\n return None\n\ndef crear_mapa(filas=10,columnas=10):\n mapa = []\n for y in range(filas):\n aux = []\n for x in range(columnas):\n aux.append(False)\n mapa.append(aux)\n return mapa \n\ndef imprimir_mapa(mapa):\n print('*'*len(mapa[1]) + 'ASIENTOS DISPONIBLES'+'*'*len(mapa[1]))\n print('\\n')\n nums = ' '\n for i, x in enumerate(mapa[1]):\n if i > 8 :\n nums += str(i+1)+'|'\n else:\n nums += str(i+1)+'| '\n print(nums)\n for i, x in enumerate(mapa):\n if i>8:\n auxiliar= str(i+1)\n else:\n auxiliar= str(i+1)+\" \"\n for y in x:\n if y ==True:\n auxiliar+=\"| X \"\n else:\n auxiliar+=\"| \"\n print(\" \"+\"-\"*len(mapa[1]*4))\n print(auxiliar)\n \ndef get_asientos(cantidad,mapa_aux):\n # r = carrera\n asientos = []\n contador =1\n imprimir_mapa(mapa_aux)\n while cantidad >= contador:\n while True:\n try: \n asiento = {}\n fila = input(f\"Seleccione la fila de su entrada numero {contador}: \")\n while not fila.isnumeric() or int(fila) not in range (1,11):\n fila = input(f\"\\nERROR - Ingreso Invalido\\nSeleccione la fila de su entrada numero {contador}: \")\n columna = input(f\"Seleccione la columna de su entrada numero {contador}: \")\n while not columna.isnumeric() or int(columna) not in range (1,11):\n columna = input(f\"\\nERROR - Ingreso Invalido\\nSeleccione la columna de su entrada numero {contador}: \")\n asiento.update({fila: columna})\n if asiento in asientos:\n raise Exception\n if mapa_aux[int(fila)-1][int(columna)-1]:\n raise Exception\n break\n except:\n print(f\"\\nEl asiento {fila} {columna} ya esta ocupado. Por favor elija otro asiento.\")\n asientos.append(asiento)\n mapa_aux[int(fila)-1][int(columna)-1]=True\n contador += 1\n print('Sus asientos seleccionados se representan con una \"X\"')\n imprimir_mapa(mapa_aux)\n return asientos\n \n'''Funcion para determinar si un numero es ondulado'''\ndef num_ondulado(number):\n ondulado = True\n count = 0\n even_index = list(str(number))[0]\n if number in range(1,10):\n return True\n else:\n odd_index = list(str(number))[1]\n if int(number) < 100:\n return True\n elif even_index == odd_index:\n return False\n else:\n for x in str(number):\n if (count+2)%2 == 0:\n if x != even_index:\n ondulado = False\n count +=1\n elif (count+2) %2 !=0:\n if x != odd_index:\n ondulado = False\n count += 1\n if ondulado == True:\n return True\n else:\n return False\n \n'''Gestion 3: Asistencia a las carreras'''\ndef confirmar_asistencia(clientes, codigos, carreras):\n cod = input('Ingrese el numero de cedula con la cual compro su ticket: ')\n while not cod.isnumeric():\n cod = input('\\nPor favor ingrese su numero de identificacion sin puntos: ')\n if cod in codigos:\n print('\\nSu ticket es valido!')\n for cliente in clientes:\n if cliente.identificacion == cod:\n for carrera in carreras:\n if carrera.nombre == cliente.carrera:\n carrera.asistencia += cliente.ticket.cantidad\n codigos.remove(cod)\n return carreras, codigos\n else:\n return None\n \ndef chequear_asistencia(carreras):\n for i, carrera in enumerate(carreras):\n print('\\t',i+1, carrera.nombre)\n circuito = input('Ingrese el numero correspondiente al circuito al cual desee chequear su asistencia: ')\n while not circuito.isnumeric() or int(circuito) not in range(1,len(carreras)+1):\n circuito = input(f\"\\nERROR - Ingreso Invalido\\nPor favor ingrese un numero entre 1 y {len(carreras)+1} correspondiente al circuito a asistir: \")\n circuito = int(circuito)-1\n print(f\"\\nAl {carreras[circuito].nombre} asistiran {carreras[circuito].asistencia} personas.\")\n \n'''Gestion 4: Restaurantes'''\n'''Verificar si un cliente es VIP'''\ndef verify_vip(clientes):\n vip = []\n for cliente in clientes:\n if cliente.ticket.tipo_entrada == 'VIP':\n vip.append(cliente.identificacion)\n ced = input('\\nPor favor ingrese su numero de identificacion para poder acceder a los productos: ')\n while not ced.isnumeric():\n ced = input('\\nERROR - Ingreso Invalido\\nPor favor ingrese su numero de identificacion para poder acceder a los productos: ')\n if ced not in vip:\n print('Debe comprar una entrada vip para poder acceder a los productos!')\n return False\n elif ced in vip:\n print('Usted es un cliente vip!')\n return cliente\n \n'''Buscar productos por nombre'''\ndef products_nombre(restaurantes, cliente):\n print(f\"Productos disponibles en {cliente.carrera}\")\n prod,go = [],False\n for r in (restaurantes):\n if r.carrera == cliente.carrera:\n for p in (r.productos):\n prod.append(p)\n go = True\n for i,p in enumerate(prod):\n print(i+1,p.nombre)\n if go:\n choice = input('\\nIngrese el numero correspondiente al producto del cual desee ver su informacion completa: ')\n while not choice.isnumeric() or int(choice) not in range(1,len(prod)+1):\n print(f\"\\nERROR - Opcion Invalida\\nRecuerde que en {cliente.carrera} solo hay {len(prod)+1} disponibles\\nIngrese el numero correspondiente al producto elegido a continuacion: \")\n choice = int(choice) -1\n for i,p in enumerate(prod):\n if i == choice:\n p.mostrar()\n else:\n print(f\"Lo sentimos, no hay productos disponibles en el {cliente.carrera}\")\n\n'''Buscar productos por tipo'''\ndef products_type(restaurantes,cliente):\n choice = input('\\nPresione 1 para ver productos de tipo bebida\\nPresione 2 para ver productos de tipo comida\\n>> ')\n while not choice.isnumeric() or int(choice) not in range(1,3):\n choice = input('\\nERROR - Ingreso Invalido\\nPor favor ingrese 1 para bebidas o 2 para comidas: ')\n prod,go = [],False\n for r in (restaurantes):\n if r.carrera == cliente.carrera:\n for p in (r.productos):\n prod.append(p)\n go = True\n if choice == '1':\n print('\\n\\nBEBIDAS DISPONIBLES EN',(cliente.carrera).upper())\n elif choice == '2':\n print('\\n\\nCOMIDA DISPONIBLE EN',(cliente.carrera).upper())\n\n if go:\n for p in prod:\n if choice == '1':\n if isinstance(p, Bebida):\n p.mostrar()\n print('*****************************')\n elif choice == '2':\n if isinstance(p, Comida):\n p.mostrar()\n print('*****************************')\n else:\n print(f\"\\nLo sentimos, no hay productos en el {cliente.carrera}\")\n \n'''Buscar productos por rango de precio'''\ndef productos_precio(restaurantes, cliente):\n while True:\n try:\n minimo = int(input('Ingrese el minimo precio del producto que desea obtener: '))\n break\n except:\n print('\\nERROR - Por favor ingrese un numero entero!')\n while True:\n try:\n maximo = int(input('Ingrese el maximo precio del producto que desea obtener: '))\n break\n except:\n print('\\nERROR - Por favor ingrese un numero entero!')\n prod, go = [], False\n for r in (restaurantes):\n if r.carrera == cliente.carrera:\n for p in (r.productos):\n if int(float(p.total)) in range (minimo,maximo+1):\n prod.append(p)\n go=True\n if go:\n print(f\"\\nProductos disponibles en el rango de {minimo}$ a {maximo}$\")\n for p in prod:\n p.mostrar()\n else:\n print(f\"\\nLo sentimos, no hay productos disponibles en el {cliente.carrera}\")\n \n'''Gestion 5: Venta de restaurante'''\n'''Comprar productos pertenecientes a la carrera a la que asiste el cliente'''\ndef get_compra(compras, cliente, restaurantes):\n orden = {}\n precio = 0\n while True:\n print(f\"Productos disponibles en {cliente.carrera}\")\n prod,go = [],False\n for r in (restaurantes):\n if r.carrera == cliente.carrera:\n for p in (r.productos):\n prod.append(p)\n go = True\n for i,p in enumerate(prod):\n print(i+1,p.nombre)\n if go:\n choice = input('\\nIngrese el numero correspondiente al producto que desee comprar: ')\n while not choice.isnumeric() or int(choice) not in range(1,len(prod)+1):\n choice = input(f\"\\nERROR - Opcion Invalida\\nRecuerde que en {cliente.carrera} solo hay {len(prod)} productos disponibles\\nIngrese el numero correspondiente al producto elegido a continuacion: \")\n choice = int(choice) -1\n while True:\n if not isinstance(prod[choice], Bebida):\n break\n else:\n if prod[choice].alcoholico and int(cliente.edad) < 18:\n choice = input('Recuerde que los menores de edad no pueden comprar bebidas alcoholicas!\\nPor favor seleccione otro producto: ')\n while not choice.isnumeric() or int(choice) not in range(1,len(prod)+1):\n choice = input(f\"\\nERROR - Opcion Invalida\\nRecuerde que en {cliente.carrera} solo hay {len(prod)} productos disponibles y que debe colocar un numero entero\\nIngrese el numero correspondiente al producto elegido a continuacion: \")\n choice = int(choice) -1 \n continue\n else:\n break\n cantidad = input(f\"Cuantos {prod[choice].nombre} desea? Recuerde que solo hay {prod[choice].inventario} unidades disponibles: \")\n while not cantidad.isnumeric() or int(cantidad) not in range(1,(prod[choice].inventario)+1):\n cantidad = input('\\nERROR - Ingreso Invalido\\nCuantos productos desea? Por favor ingrese un numero entero: ')\n cantidad = int(cantidad)\n if prod[choice].nombre not in orden.keys():\n orden.update({prod[choice].nombre:cantidad})\n else:\n orden[prod[choice].nombre] += cantidad\n precio += prod[choice].total * cantidad\n prod[choice].inventario -= cantidad\n if input('Presione \"X\" para comprar otro producto o cualquier tecla para finalizar su compra: ').title() != 'X':\n break\n else:\n continue\n else:\n print(f\"Lo sentimos, no hay productos disponibles en el {cliente.carrera}\")\n return \n if perfectos(cliente.identificacion) == True:\n x = Compra(cliente.identificacion, orden, precio, precio*0.15, (precio-(precio*0.15)))\n print(f\"\\n\\n\\t----- COMPRA DE {cliente.nombre.upper()} -----\")\n x.mostrar()\n if input('\\nPresione cualquier tecla para confirmar o \"X\" para cancelar compra: ').title() !='X': \n print('\\nSe ha realizado la compra con exito!\\n') \n compras.append(x)\n return restaurantes, compras\n else:\n for p, cantidad in orden.items():\n for pro in prod:\n if p == pro.nombre:\n prod.inventario += cantidad\n print('Se ha cancelado la compra')\n else:\n x = Compra(cliente.identificacion, orden, precio, 0, precio)\n print(f\"\\n\\t----- Compra de {cliente.nombre} -----\")\n x.mostrar()\n if input('Presione cualquier tecla para confirmar o \"X\" para cancelar compra: ').title() !='X': \n print('\\nSe ha realizado la compra con exito!\\n') \n compras.append(x)\n return restaurantes, compras\n else:\n for p, cantidad in orden.items():\n for pro in prod:\n if p == pro.nombre:\n pro.inventario += cantidad\n print('Se ha cancelado la compra')\n \n'''Determinar si un numero es perfecto'''\ndef perfectos(num):\n num = int(num)\n suma = 0\n for divisor in range(1, num):\n if (num % divisor) == 0:\n suma += divisor\n if suma == num:\n return True\n else:\n return False\n \n'''Gestion 6: Estadisticas'''\n'''Promedio de gasto de un cliente VIP'''\ndef promedio_vip(clientes, compras):\n total = 0\n contador = 0\n for cliente in clientes:\n if cliente.ticket.tipo_entrada == 'VIP':\n total += cliente.ticket.precio\n contador+=1\n if compras != []:\n for compra in compras:\n if cliente.identificacion == compra.cedula:\n total += compra.total\n try: \n promedio = total/contador\n print(f'Un cliente VIP gasta en promedio {round(promedio,2)}$')\n except:\n print('No hay clientes VIP registrados en el sistema')\n\n'''Tabla de asistencia a las carreras''' \nfrom tabulate import tabulate\ndef tabla_asistencia(carreras):\n aux = sorted(carreras, key=lambda x: x.asistencia, reverse=True)\n l=[]\n for i, carrera in enumerate(aux):\n try:\n relacion = carrera.asistencia / carrera.boletos_vendidos\n except:\n relacion = 0\n d=[i+1,carrera.nombre,carrera.circuito.name,carrera.asistencia, carrera.boletos_vendidos, relacion]\n l.append(d)\n print(tabulate(l ,headers=['Posicion','Nombre','Estadio','Asistencia','Boletos','Relacion asistencia/boletos']))\n\n'''Carrera con mayor asistencia'''\ndef mayor_asistencia(carreras):\n max, max_carrera = 0, ''\n for carrera in carreras:\n if carrera.asistencia > max:\n max = carrera.asistencia\n max_carrera = carrera.nombre\n if max == 0:\n print('\\nNo se ha confirmado asistencia para ninguna carrera.')\n else:\n print(f'\\nLa carrera con mayor asistencia es {max_carrera}, para la cual asistiran {max} personas.')\n aux = sorted(carreras, key=lambda x:x.asistencia, reverse=True)\n l = []\n ind =[]\n contador = 0\n for prod in aux:\n if contador < 5:\n ind.append(prod.nombre)\n l.append(prod.asistencia)\n contador+=1\n else:\n break\n data = pd.DataFrame(l,index=ind)\n total = data.sum(axis=1)\n plt.bar(total.index, total)\n plt.show()\n\n'''Carrera con mayor boletos vendidos'''\ndef mayor_boletos(carreras):\n max, max_carrera = 0, ''\n for carrera in carreras:\n if carrera.boletos_vendidos > max:\n max = carrera.boletos_vendidos\n max_carrera = carrera.nombre\n if max == 0:\n print('\\nNo se han vendidos boletos para ninguna carrera.')\n else:\n print(f'\\nLa carrera con mayor boletos vendidos es {max_carrera}, para la cual se vendieron {max} boletos.')\n aux = sorted(carreras, key=lambda x:x.boletos_vendidos, reverse=True)\n l = []\n ind =[]\n contador = 0\n for prod in aux:\n if contador < 5:\n ind.append(prod.nombre)\n l.append(prod.boletos_vendidos)\n contador+=1\n else:\n break\n data = pd.DataFrame(l,index=ind)\n total = data.sum(axis=1)\n plt.bar(total.index, total)\n plt.show()\n\n'''Top 3 productos mas vendidos'''\ndef max_productos(restaurantes):\n productos = []\n for restaurante in restaurantes:\n for p in restaurante.productos:\n productos.append(p)\n aux = sorted(productos, key=lambda x: x.inventario)\n if aux[0].inventario == 300:\n print('No se ha vendido ningun producto')\n elif aux[1].inventario == 300:\n print(f\"1. {aux[0].nombre}\")\n elif aux[2].inventario == 300:\n print(f\"1. {aux[0].nombre}\\n2. {aux[1].nombre}\")\n else:\n print(f\"1. {aux[0].nombre}\\n2. {aux[1].nombre}\\n3. {aux[2].nombre}\")\n l = []\n ind =[]\n contador = 0\n for prod in aux:\n if contador < 5:\n ind.append(prod.nombre)\n l.append(300 - prod.inventario)\n contador+=1\n else:\n break\n \n if aux[0].inventario != 300:\n data = pd.DataFrame(l,index=ind)\n total = data.sum(axis=1)\n plt.bar(total.index, total)\n plt.show()\n\n'''Top 3 clientes'''\ndef max_clientes(clientes):\n aux = sorted(clientes, key=lambda x: x.ticket.cantidad,reverse=True)\n if len(aux) == 0:\n print('Aun no se han vendido boletos')\n elif len(aux) == 1:\n print(f\"1. {aux[0].nombre.title()}\")\n elif len(aux) == 2:\n print(f\"1. {aux[0].nombre.title()}\\n2. {aux[1].nombre.title()}\")\n else:\n print(f\"1. {aux[0].nombre.title()}\\n2. {aux[1].nombre.title()}\\n3. {aux[2].nombre.title()}\")\n l = []\n ind =[]\n for client in aux:\n ind.append(client.nombre.title())\n l.append(client.ticket.cantidad)\n if len(aux) != 0:\n data = pd.DataFrame(l,index=ind)\n total = data.sum(axis=1)\n plt.bar(total.index, total)\n plt.show()\n ","repo_name":"anabellaj/ProyectoFormula1","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":31473,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"25661923808","text":"import pymysql\r\nimport requests\r\nimport xml.etree.ElementTree as ET\r\nimport json\r\nimport smtplib\r\nimport random\r\nimport difflib\r\nimport sched, time\r\n\r\ns = sched.scheduler(time.time, time.sleep)\r\n\r\n\r\ndef get_query():\r\n\r\n with open('config.json', 'r') as f:\r\n config = json.load(f)\r\n\r\n conn = pymysql.connect(host=config['dbHost'],\r\n user=config['dbUser'],\r\n password=config['dbPassword'],\r\n db=config['dbName'])\r\n try:\r\n # Grabs the query that has been in db the longest and does not already have a result\r\n with conn.cursor() as cursor:\r\n sql = 'SELECT query, userID, queryID FROM querys WHERE result IS NULL ORDER BY queryID LIMIT 1'\r\n cursor.execute(sql)\r\n data = cursor.fetchone()\r\n if data:\r\n query = data[0]\r\n user_id = data[1]\r\n query_id = data[2]\r\n request_data = {'query': query, 'user_id': user_id, 'query_id': query_id}\r\n else:\r\n query = None\r\n cursor.close()\r\n finally:\r\n conn.close()\r\n if query:\r\n return request_data\r\n else:\r\n return None\r\n\r\n\r\ndef get_session_key():\r\n\r\n # XML string used to interact with i2b2 API, will use an xml string like this for every i2b2 call\r\n ### Currently using demo username and password, will need to come back to this when we get into authentication\r\n xml = \"\"\"\r\n \r\n \r\n \r\n http://brsadata01pv:9090/i2b2/services/PMService/getServices\r\n \r\n\r\n 1.1\r\n 2.4\r\n \r\n i2b2 Project Management\r\n 1.6\r\n \r\n \r\n i2b2 Hive\r\n \r\n \r\n Project Management Cell\r\n 1.6\r\n \r\n \r\n i2b2 Hive\r\n \r\n 2018-07-03T15:38:55-04:00\r\n \r\n i2b2demo\r\n demo\r\n demouser\r\n \r\n \r\n jFymL84vZ75999jaS9IYH\r\n 0\r\n \r\n \r\n P\r\n I\r\n \r\n AL\r\n AL\r\n US\r\n undefined\r\n \r\n \r\n 180000\r\n \r\n \r\n \r\n undefined\r\n \r\n \r\n \"\"\"\r\n\r\n # Stores xml response in xml file, similar files for each function which calls I2B2\r\n resp = requests.post('http://brsadata01pv:909/i2b2-webclient/index.php', data=xml)\r\n with open('login.xml', 'wb') as f:\r\n f.write(resp.content)\r\n\r\n # Have to pass namespaces as argument to dom as it does not recognize them by default\r\n namespaces = {'ns2': 'http://www.i2b2.org/xsd/hive/msg/1.1/', 'ns4': 'http://www.i2b2.org/xsd/cell/pm/1.1/',\r\n 'ns3': 'http://www.i2b2.org/xsd/hive/msg/version/'}\r\n dom = ET.parse('login.xml')\r\n data = dom.findall('message_body/ns4:configure/user', namespaces)\r\n session_key = None # Declared here for scope reasons\r\n for d in data:\r\n session_key = d.find('password').text\r\n return session_key\r\n\r\n\r\ndef get_categories(session_key):\r\n\r\n xml = \"\"\"\r\n \r\n \r\n \r\n http://brsadata01pv:9090/i2b2/services/OntologyService/getCategories\r\n \r\n \r\n 1.1\r\n 2.4\r\n \r\n i2b2 Ontology\r\n 1.6\r\n \r\n \r\n i2b2 Hive\r\n \r\n \r\n Ontology Cell\r\n 1.6\r\n \r\n \r\n i2b2 Hive\r\n \r\n 2018-07-12T16:10:29-04:00\r\n \r\n i2b2demo\r\n demo\r\n \"\"\" + session_key + \"\"\"\r\n \r\n \r\n 6O67XVpM9Kvk0n330Lequ\r\n 0\r\n \r\n \r\n P\r\n I\r\n \r\n AL\r\n AL\r\n US\r\n Demo\r\n \r\n \r\n 180000\r\n \r\n \r\n \r\n \r\n \"\"\"\r\n\r\n resp = requests.post('http://brsadata01pv:909/i2b2-webclient/index.php', data=xml)\r\n with open('categories.xml', 'wb') as f:\r\n f.write(resp.content)\r\n\r\n namespaces = {'ns5': \"http://www.i2b2.org/xsd/hive/msg/1.1/\", 'ns6': \"http://www.i2b2.org/xsd/cell/ont/1.1/\"}\r\n dom = ET.parse('categories.xml')\r\n data = dom.findall('message_body/ns6:concepts/concept', namespaces)\r\n\r\n key_strings = [] # Declared here for scope reasons\r\n for d in data:\r\n item = d.find('key').text\r\n key_strings.append(item)\r\n\r\n # Formats and stores categories in list to return\r\n categories = []\r\n for cat in key_strings:\r\n item = cat.split('\\\\')\r\n categories.append(item[2])\r\n return categories\r\n\r\n\r\ndef get_search_keys(session_key, query, categories):\r\n\r\n search_keys = [] # Declared here for scope reasons\r\n # We must search through each category as there is no 'all category' option\r\n for cat in categories:\r\n xml = \"\"\"\r\n \r\n \r\n \r\n http://brsadata01pv:9090/i2b2/services/OntologyService/getNameInfo\r\n \r\n \r\n 1.1\r\n 2.4\r\n \r\n i2b2 Ontology\r\n 1.6\r\n \r\n \r\n i2b2 Hive\r\n \r\n \r\n Ontology Cell\r\n 1.6\r\n \r\n \r\n i2b2 Hive\r\n \r\n 2018-07-12T09:01:10-04:00\r\n \r\n i2b2demo\r\n demo\r\n \"\"\" + session_key + \"\"\"\r\n \r\n \r\n rHvQx1vX2Fb8r9grjy8MZ\r\n 0\r\n \r\n \r\n P\r\n I\r\n \r\n AL\r\n AL\r\n US\r\n Demo\r\n \r\n \r\n 180000\r\n \r\n \r\n \r\n \"\"\" + query + \"\"\"\r\n \r\n \r\n \"\"\"\r\n\r\n resp = requests.post('http://brsadata01pv:909/i2b2-webclient/index.php', data=xml)\r\n # Will store each category's search keys temporarily but after function is executed, file will only have the last category\r\n with open('search_keys.xml', 'wb') as f:\r\n f.write(resp.content)\r\n\r\n namespaces = {'ns5': 'http://www.i2b2.org/xsd/hive/msg/1.1/', 'ns6': 'http://www.i2b2.org/xsd/cell/ont/1.1/'}\r\n dom = ET.parse('search_keys.xml')\r\n data = dom.findall('message_body/ns6:concepts/concept', namespaces)\r\n ### For now assign a random totalnum value, will need to pull real value once connected to actual I2B2\r\n for d in data:\r\n key = d.find('key').text\r\n total_num = d.find('totalnum').text\r\n if total_num is None:\r\n total_num = random.randint(0, 11)\r\n set_pair = [key, int(total_num)]\r\n search_keys.append(set_pair)\r\n # We return a list of lists each storing a key and a totalnum value\r\n return search_keys\r\n\r\n\r\ndef find_best(search_keys, query):\r\n\r\n max_num = 0\r\n best_key = ''\r\n # We choose the best key based on totalnum and use pythons levenshtein algorithm as a tiebreak\r\n for item in search_keys:\r\n if item[1] > max_num:\r\n max_num = item[1]\r\n best_key = item[0]\r\n elif item[1] == max_num:\r\n sequence = difflib.SequenceMatcher(isjunk=None, a=item[0], b=query)\r\n similarity_competitor = round(sequence.ratio() * 100, 1)\r\n sequence = difflib.SequenceMatcher(isjunk=None, a=best_key, b=query)\r\n similarity_leader = round(sequence.ratio() * 100, 1)\r\n if similarity_competitor > similarity_leader:\r\n best_key = item[0]\r\n return best_key\r\n\r\n\r\ndef get_i2b2_result(session_key, best_key):\r\n\r\n xml = \"\"\"\r\n \r\n \r\n \r\n http://brsadata01pv:9090/i2b2/services/QueryToolService/request\r\n \r\n \r\n \r\n i2b2_QueryTool\r\n 1.6\r\n \r\n \r\n PHS\r\n \r\n \r\n i2b2_DataRepositoryCell\r\n 1.6\r\n \r\n \r\n PHS\r\n \r\n \r\n i2b2demo\r\n demo\r\n \"\"\" + session_key + \"\"\"\r\n \r\n \r\n Q04\r\n EQQ\r\n \r\n \r\n g5PzP1I7EaS6fwRJ7H7PN\r\n 0\r\n \r\n \r\n P\r\n I\r\n \r\n messageId\r\n Demo\r\n \r\n \r\n 180000\r\n \r\n \r\n \r\n demo\r\n 0\r\n 0\r\n optimize_without_temp_table\r\n CRC_QRY_runQueryInstance_fromQueryDefinition\r\n \r\n \r\n \r\n N/A\r\n ANY\r\n 0\r\n \r\n 1\r\n 100\r\n 0\r\n ANY\r\n 1\r\n - \r\n 6\r\n N/A\r\n \"\"\" + best_key + \"\"\"\r\n \r\n N/A\r\n ENC\r\n LA\r\n false\r\n
\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \"\"\"\r\n\r\n resp = requests.post('http://brsadata01pv:909/i2b2-webclient/index.php', data=xml)\r\n with open('results.xml', 'wb') as f:\r\n f.write(resp.content)\r\n\r\n namespaces = {'ns5': 'http://www.i2b2.org/xsd/hive/msg/1.1/', 'ns4': \"http://www.i2b2.org/xsd/cell/crc/psm/1.1/\"}\r\n dom = ET.parse('results.xml')\r\n data = dom.findall('message_body/ns4:response/query_result_instance', namespaces)\r\n result = None\r\n for d in data:\r\n result = d.find('set_size').text\r\n return result\r\n\r\n\r\ndef get_user_email (user_id):\r\n\r\n with open('config.json', 'r') as f:\r\n config = json.load(f)\r\n\r\n conn = pymysql.connect(host=config['dbHost'],\r\n user=config['dbUser'],\r\n password=config['dbPassword'],\r\n db=config['dbName'])\r\n try:\r\n with conn.cursor() as cursor:\r\n sql = \"SELECT email FROM userInfo where userInfo.userID = \" + str(user_id) + \"\"\r\n cursor.execute(sql)\r\n data = cursor.fetchone();\r\n user_email = data[0]\r\n cursor.close()\r\n finally:\r\n conn.close()\r\n return user_email\r\n\r\n\r\ndef send_email(user_email, msg):\r\n\r\n try:\r\n with open('config.json', 'r') as f:\r\n config = json.load(f)\r\n server = smtplib.SMTP('smtp.gmail.com:587')\r\n server.ehlo()\r\n server.starttls()\r\n server.ehlo()\r\n server.login(config['emailAddress'], config['password'])\r\n ### Eventually want to have users email address passed to use here from Alexa skill\r\n to_address = user_email\r\n # Must start message string with newline otherwise email body will be empty\r\n server.sendmail(config['emailAddress'], to_address, msg)\r\n server.quit()\r\n print('Email sent successfully!')\r\n except:\r\n print('Email failed to send!')\r\n\r\n\r\ndef update_db(result, query_id):\r\n\r\n with open('config.json', 'r') as f:\r\n config = json.load(f)\r\n\r\n conn = pymysql.connect(host=config['dbHost'],\r\n user=config['dbUser'],\r\n password=config['dbPassword'],\r\n db=config['dbName'])\r\n try:\r\n with conn.cursor() as cursor:\r\n sql = \"UPDATE querys SET result = \" + result + \" WHERE queryID = \" + str(query_id)\r\n cursor.execute(sql)\r\n cursor.close()\r\n conn.commit()\r\n finally:\r\n conn.close()\r\n print('\\nDB updated')\r\n\r\n\r\ndef main(sc):\r\n request_data = get_query()\r\n # If no requests are currently available, end program\r\n if request_data is None:\r\n print('\\nNo current requests to process')\r\n return\r\n # Unpack request_data dictionary\r\n query = request_data['query']\r\n user_id = request_data['user_id']\r\n query_id = request_data['query_id']\r\n # Currently setting default query as application is not live yet\r\n print('\\nUser query: ' + query + '\\n')\r\n # Gets session key necessary for each request we make to I2B2\r\n session_key = get_session_key()\r\n print(session_key + '\\n')\r\n # Need categories of data to search through each one for our requested data, no 'all' category option\r\n categories = get_categories(session_key)\r\n print('Categories = ' + str(categories) + '\\n')\r\n # Returns necessary key information as well as totalnum as a list of lists\r\n search_keys = get_search_keys(session_key, query, categories)\r\n # Check whether or not requested data exists\r\n if search_keys:\r\n print('All search keys returned: ' + str(search_keys) + '\\n')\r\n # Finds the 'best key' based on highest sample size and similarity to user query\r\n best_key = find_best(search_keys, query)\r\n print('Best key: ' + str(best_key) + '\\n')\r\n # Finally gets the desired data, which as of now is the set size\r\n result = get_i2b2_result(session_key, best_key)\r\n print('Set size: ' + result + '\\n')\r\n # Pulls the users email from db\r\n user_email = get_user_email(user_id)\r\n print(user_email + '\\n')\r\n # Send the user an email with their result\r\n message = '\\nHello, this is Alexa with I2B2. We have located the data you requested:\\nYou wanted us to query i2b2 for the phrase: ' \\\r\n + query + '.\\nGiven this information we ran a query on the following: ' + best_key + '\\nWe found this data set to have set size of: ' + result + '.'\r\n send_email(user_email, message)\r\n # Commits the changes to the db, updating the result column\r\n update_db(result, query_id)\r\n # If requested data doesn't exist...\r\n else:\r\n print(\"Could not find requested data\\n\")\r\n # Send email saying there was no data matching the user query in I2B2\r\n message = '\\nHello, this is Alexa with I2B2.\\nThe data you requested does not exist in the I2B2 database. ' \\\r\n + 'Please submit another request to Alexa making sure to use a valid query'\r\n user_email = get_user_email(user_id)\r\n send_email(user_email, message)\r\n # Put -1 as result in db to show error in users query string\r\n update_db('-1', query_id)\r\n\r\n s.enter(5, 1, main, (sc,))\r\n\r\n\r\n# Schedule main to run every 5 seconds\r\nwhile True:\r\n s.enter(5, 1, main, (s,))\r\n s.run()\r\n","repo_name":"willkc15/EchoI2B2","sub_path":"InternalFunctions/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":22452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"71398123093","text":"\nfrom System import IntPtr, Type\nfrom System.Runtime.InteropServices import Marshal\n\nfrom Ironclad import CPyMarshal\nfrom Ironclad.Structs import PyIntObject, PyObject, PyTypeObject\n\ndef OffsetPtr(ptr, offset):\n if type(offset) == IntPtr:\n offset = offset.ToInt32()\n return IntPtr(ptr.ToInt32() + offset)\n\n# note: PyBuffer_Type, PyCObject_Type, PyCode_Type, PyFrame_Type PyTraceBack_Type and PyCFunction_Type\n# are not included, because they are implemented in pure C.\n# This means that, should an extension end up actually using (say) a buffer type\n# and passing it up to IronPython, it will be treated like any other type \n# defined in a C extension.\n# PyFile_Type is a special case: it *should* be filled in by C code but, in a test\n# context, is usually not. So, we zero it and fill in the one critical method.\n_types = (\n \"PyType_Type\",\n \"PyBaseObject_Type\",\n \"PyCell_Type\",\n \"PyClass_Type\",\n \"PyInstance_Type\",\n \"PyMethod_Type\",\n \"PyComplex_Type\",\n \"PyWrapperDescr_Type\",\n \"PyProperty_Type\",\n \"PyDict_Type\",\n \"PyEnum_Type\",\n \"PyReversed_Type\",\n \"PyFile_Type\",\n \"PyFloat_Type\",\n \"PyFunction_Type\",\n \"PyClassMethod_Type\",\n \"PyStaticMethod_Type\",\n \"PyGen_Type\",\n \"PyInt_Type\",\n \"PyBool_Type\", # needs to come after PyInt_Type, if it's to have tp_base filled in correctly\n \"PySeqIter_Type\",\n \"PyCallIter_Type\",\n \"PyList_Type\",\n \"PyLong_Type\",\n \"PyCFunction_Type\",\n \"PyModule_Type\",\n \"PySuper_Type\",\n \"PyRange_Type\",\n \"PySet_Type\",\n \"PyFrozenSet_Type\",\n \"PySlice_Type\",\n \"PyBaseString_Type\",\n \"PySTEntry_Type\",\n \"PyString_Type\",\n \"PySymtableEntry_Type\",\n \"PyTuple_Type\",\n \"PyUnicode_Type\",\n \"PyNone_Type\", # not exported, for some reason\n \"PyEllipsis_Type\", # not exported, for some reason\n \"PyNotImplemented_Type\", # not exported, for some reason\n \"_PyWeakref_RefType\",\n \"_PyWeakref_ProxyType\",\n \"_PyWeakref_CallableProxyType\"\n)\n\nsizeOfType = Marshal.SizeOf.Overloads[Type]\nPtrToStructure = Marshal.PtrToStructure.Overloads[IntPtr, Type]\n\n_others = {\n \"_Py_NoneStruct\": sizeOfType(PyObject),\n \"_Py_NotImplementedStruct\": sizeOfType(PyObject),\n \"_Py_EllipsisObject\": sizeOfType(PyObject),\n \"_Py_ZeroStruct\": sizeOfType(PyIntObject),\n \"_Py_TrueStruct\": sizeOfType(PyIntObject),\n \"_PyThreadState_Current\": sizeOfType(IntPtr),\n}\ndef CreateTypes(mapper, readyTypes=True):\n blocks = []\n\n def create(name, size):\n block = Marshal.AllocHGlobal(size)\n if name == 'PyFile_Type':\n CPyMarshal.Zero(block, size);\n CPyMarshal.WritePtrField(block, PyTypeObject, 'tp_dealloc', mapper.GetFuncPtr('IC_file_dealloc'))\n mapper.RegisterData(name, block)\n blocks.append(block)\n \n for _type in _types:\n create(_type, Marshal.SizeOf(PyTypeObject()))\n for (_other, size) in _others.items():\n create(_other, size)\n \n if readyTypes:\n mapper.ReadyBuiltinTypes()\n \n def DestroyTypes():\n for block in blocks:\n Marshal.FreeHGlobal(block)\n \n return DestroyTypes\n\n\n","repo_name":"IronLanguages/ironclad","sub_path":"tests/utils/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"67"}
+{"seq_id":"39543461604","text":"import numpy\nimport json\nimport pybullet\n\nclass TrackLoad:\n def __init__(self, pb_client, file_name_prefix):\n \n self.pi = 3.141592654\n self.points = self._load_json(file_name_prefix + \".json\")\n self.urfd_model = pb_client.loadURDF(file_name_prefix + \".urdf\")\n\n def _load_json(self, file_name):\n\n with open(file_name) as f:\n json_data = json.load(f)\n \n points_count = json_data[\"points_count\"]\n points = numpy.zeros((points_count, 2))\n for i in range(points_count):\n points[i][0] = json_data[\"points\"][i][0]\n points[i][1] = json_data[\"points\"][i][1]\n\n return points\n\n \n def get_length(self):\n return len(self.points)\n\n def get_start(self, idx = 20):\n dx = self.points[idx + 1][0] - self.points[idx][0]\n dy = self.points[idx + 1][1] - self.points[idx][1]\n\n yaw = numpy.arctan2(dy, dx)\n\n point = [self.points[idx][0], self.points[idx][1], 0.05]\n orientation = [yaw, 0.0, 0.0]\n return [point, orientation]\n\n def get_start_random(self):\n idx = numpy.random.randint((self.get_length()*80)//100) + 20\n return self.get_start(idx)\n\n def get_closest(self, x, y):\n position = [x, y]\n\n dif = self.points - position\n distances = (numpy.sum((dif**2), axis = 1))**0.5\n\n closest_idx = numpy.argmin(distances)\n closest_distance = distances[closest_idx]\n\n return closest_idx, closest_distance\n\n","repo_name":"michalnand/line_follower_rl","sub_path":"ai_gym_train/gym_linefollower/track_load.py","file_name":"track_load.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"}
+{"seq_id":"12722122593","text":"import matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport numpy as np\nfrom typing import List\nimport sys\nfrom pathlib import Path\n\nplt.rcParams[\"font.family\"] = \"Times New Roman\"\nsys.path.append(str(Path(__file__).parent.parent))\n\nplt.rcParams[\"font.size\"] = 11\nplt.rcParams[\"pdf.fonttype\"] = 42\n\nfrom plotting_helper import task_pos_in_order\nfrom blackbox_helper import get_configs\nfrom collect_yahpo_evaluations_for_plotting import hp_names as hp_names_raw\n\n\ndef plot_hp_perf(\n scenario: str,\n instance: str,\n metric: str,\n train_fracs: List[float],\n x_par,\n y_par,\n log_x=True,\n log_y=True,\n marker_colour=\"black\",\n task_values=None,\n save_fig=True,\n alpha_back=1,\n alpha_crosses=1,\n fig_axes=None,\n only_crosses=False,\n edgewidth=1.75,\n cross_size=100,\n):\n\n hp_names = [\n hp\n for hp in hp_names_raw[scenario]\n if hp not in [\"trainsize\", \"task_id\", \"repl\"]\n ]\n if fig_axes is None:\n fig, axes = plt.subplots(1, len(train_fracs), figsize=(8, 2 * 8 / 11))\n else:\n fig, axes = fig_axes\n\n csv_path = \"yahpo_data/%s/%s.csv\" % (scenario, instance)\n if not os.path.exists(csv_path):\n raise ValueError\n\n df = pd.read_csv(csv_path)\n\n for j, frac in enumerate(train_fracs):\n tdf_i = df[df[\"train_frac\"] == frac]\n\n x = []\n for _, row in tdf_i.iterrows():\n hp = eval(row[\"hp_key\"])\n hp.update({metric: row[metric]})\n x.append(hp)\n pdf = pd.DataFrame(x, columns=hp_names + [metric])\n perf_df = pdf.groupby([x_par, y_par])[metric].mean().reset_index()\n if not only_crosses:\n sc = axes[j].scatter(\n perf_df[x_par],\n perf_df[y_par],\n c=perf_df[metric],\n s=5,\n vmin=np.min(df[metric]),\n vmax=np.max(df[metric]),\n alpha=alpha_back,\n zorder=0,\n )\n\n inds = perf_df[metric].reset_index().nlargest(10, columns=metric).index\n for k, ind in enumerate(inds):\n axes[j].scatter(\n perf_df[x_par].loc[ind],\n perf_df[y_par].loc[ind],\n marker=\"X\",\n color=marker_colour,\n s=cross_size,\n edgecolors=\"white\",\n linewidths=edgewidth,\n alpha=alpha_crosses,\n zorder=20,\n )\n\n axes[j].set_xlabel(x_par)\n if log_x:\n axes[j].set_xscale(\"log\")\n if log_y:\n axes[j].set_yscale(\"log\")\n\n task_num = task_pos_in_order(int(frac * 20), task_values)\n axes[j].set_title(\"Task %s (%s %%)\" % (task_num, int(frac * 100)))\n axes[0].set_ylabel(y_par)\n\n for ii in range(len(train_fracs)):\n if scenario == \"rbv2_svm\":\n axes[ii].set_xticks([0.01, 100])\n elif scenario == \"rbv2_aknn\":\n axes[ii].set_xticks([20, 40])\n elif scenario == \"rbv2_ranger\":\n axes[ii].set_xticks([500, 1500])\n elif scenario == \"rbv2_glmnet\":\n axes[ii].set_xticks([0.001, 0.1])\n\n if not only_crosses:\n fig.subplots_adjust(right=0.90)\n cbar_ax = fig.add_axes([0.92, 0.1, 0.01, 0.8])\n cbar = fig.colorbar(sc, cax=cbar_ax)\n cbar.set_label(metric)\n\n for ii in range(1, len(train_fracs)):\n axes[ii].set_yticklabels([])\n\n if save_fig:\n plot_file = f\"plotting/Figures/yahpo_landscapes/yahpo_landscape_{scenario}_{instance}_{x_par}_{y_par}_2d.pdf\"\n plt.savefig(plot_file, dpi=400, bbox_inches=\"tight\", pad_inches=0)\n print(\"Generated file %s\" % plot_file)\n return fig, axes\n\n\npars_to_plot = {\n \"rbv2_svm\": (\"cost\", \"tolerance\"),\n \"rbv2_aknn\": (\"k\", \"M\"),\n \"rbv2_ranger\": (\"num.trees\", \"min.node.size\"),\n \"rbv2_glmnet\": (\"alpha\", \"s\"),\n}\ntrain_fracs = [0.05, 0.25, 0.5, 0.75, 1.0]\n\n\nif __name__ == \"__main__\":\n os.makedirs(\"plotting/Figures/yahpo_landscapes\", exist_ok=True)\n\n instances = {\n \"rbv2_svm\": [1220, 458],\n \"rbv2_aknn\": [4538, 41138],\n \"rbv2_ranger\": [4154, 40978],\n \"rbv2_glmnet\": [375, 40981],\n }\n\n for scenario in [\"rbv2_svm\", \"rbv2_aknn\", \"rbv2_ranger\", \"rbv2_glmnet\"]:\n for instance in instances[scenario]:\n\n log_axis = {\n \"rbv2_svm\": (True, True),\n \"rbv2_aknn\": (False, False),\n \"rbv2_ranger\": (False, False),\n \"rbv2_glmnet\": (True, True),\n }[scenario]\n\n x_par, y_par = pars_to_plot[scenario]\n task_values = get_configs(\n \"YAHPO\", yahpo_dataset=instance, yahpo_scenario=scenario\n )[0]\n\n _ = plot_hp_perf(\n scenario,\n instance,\n metric=\"auc\",\n train_fracs=train_fracs,\n x_par=x_par,\n y_par=y_par,\n log_x=log_axis[0],\n log_y=log_axis[1],\n marker_colour=\"black\",\n task_values=task_values,\n )\n","repo_name":"awslabs/syne-tune","sub_path":"benchmarking/nursery/othpo/plotting/plot_yahpo_landscapes.py","file_name":"plot_yahpo_landscapes.py","file_ext":"py","file_size_in_byte":5092,"program_lang":"python","lang":"en","doc_type":"code","stars":332,"dataset":"github-code","pt":"67"}
+{"seq_id":"14079531704","text":"import torch\nimport pyro\nimport pyro.distributions as dist\n\ndef init_vector(name, dims=None):\n return pyro.sample(name, dist.Normal(torch.zeros(dims), 0.2 * torch.ones(dims)).to_event(1))\n\n\n\ndef validate_data_def(data):\n assert 'G' in data, 'variable not found in data: key=G'\n assert 'N' in data, 'variable not found in data: key=N'\n assert 'r' in data, 'variable not found in data: key=r'\n assert 'n' in data, 'variable not found in data: key=n'\n # initialize data\n G = data[\"G\"]\n N = data[\"N\"]\n r = data[\"r\"]\n n = data[\"n\"]\n\ndef init_params(data):\n params = {}\n return params\n\ndef model(data, params):\n # XXX: this model currenty NaNs\n # initialize data\n G = data[\"G\"]\n N = data[\"N\"]\n r = data[\"r\"]\n n = data[\"n\"]\n\n # model block\n with pyro.plate('a_', G, dim=-2):\n mu = pyro.sample('mu', dist.Uniform(0., 1.))\n a_plus_b = pyro.sample('a_plus_b', dist.Pareto(0.1, 1.5))\n a = mu * a_plus_b\n b = (1 - mu) * a_plus_b\n with pyro.plate('data', N, dim=-1):\n p = pyro.sample('p', dist.Beta(a, b))\n r = pyro.sample('r', dist.Binomial(n, p), obs=r)\n\n","repo_name":"pyro-ppl/pyro-models","sub_path":"pyro_models/bugs/litter.py","file_name":"litter.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"67"}
+{"seq_id":"9842388819","text":"#Stats.py Compute the Mean and Median out of a Odd number of numbers in ordered list\n\ndef main():\n \n #Starts Stat Program Instruction \n print('\\nWelcome to the Stat Program\\nBEWARE, ENTER ONLY SEQUENCES AMOUNTING LEN[...] == ODD ')\n print('PLEASE ONLY ENTER LIST OF NUMBERS SEPARTED BY , AND MUST BE IN ORDER \\n')\n #Print and store a list of numbers to be computed\n n = ((input('Please enter your list of numbers to be computed? : ')).split(','))\n\n #Grab the length of the list\n nLength = len(n)\n\n #Setup storage for sum and median \n sum = 0\n median = 0\n\n #loop though first half of sequence adding to sum \n for i in range(int(nLength/2)):\n sum = sum + int(n[i])\n print('Counting your wishs @: '+ str(sum))\n\n #Next number is the median\n print('\\nComputing median...and drawing breath') \n median = n[int(nLength/2)]\n\n #Add median to the sum \n sum = sum + int(median)\n print('the half way is here...wherever that is ..\\n ')\n\n #Loop though the second half of sequence adding to sum \n for i in range (int(nLength/2)):\n sum = sum + int(n[(int(nLength/2)+1)+i])\n print('Counting your disappointments @: '+ str(sum))\n \n #Store the mean\n mean = float(sum/nLength)\n\n #Print the result to the user\n print('\\nGiven a moment to consider your question....\\nI have come to the conclusion\\n')\n print('The stats for this sequence of numbers is :\\nMedian: '+ str(median)+ '\\nMean: '+ str(mean))\n\nmain()\n\n\n\n\n\n","repo_name":"trew13690/CSC101Code","sub_path":"Work/Week2/stat.py","file_name":"stat.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"12384239899","text":"from pynput.keyboard import Key, Controller\nkeyboard = Controller()\n\n\n# key for each button ID in universal remote mode\nmedia_commands = {\n 'play_pause': Key.media_play_pause,\n 'volume_down': Key.media_volume_down,\n 'volume_up': Key.media_volume_up,\n 'playlist_previous': Key.media_previous,\n 'playlist_next': Key.media_next\n}\n\n# key/key-combinations for each button ID in youtube remote mode\nyoutube_commands = {\n 'shortcuts': {\n 'play_pause': 'k',\n 'yt_forward': 'l',\n 'yt_rewind': 'j',\n 'yt_captions': 'c',\n 'yt_fullscreen': 'f'\n },\n 'shift': {\n 'playlist_previous': 'p',\n 'playlist_next': 'n',\n },\n 'media': {\n 'volume_down': Key.media_volume_down,\n 'volume_up': Key.media_volume_up,\n }\n}\n\ndef control(mode, key):\n\n \"\"\"\n args:\n mode: 'YOUTUBE' or 'NORMAL'\n key: button_id which maps to its corresponding pynput Key\n \"\"\"\n\n if mode == 'YOUTUBE':\n\n if key in youtube_commands['shortcuts'].keys():\n keyboard.type(youtube_commands['shortcuts'][key])\n\n elif key in youtube_commands['shift'].keys():\n with keyboard.pressed(Key.shift):\n keyboard.type(youtube_commands['shift'][key])\n\n elif key in youtube_commands['media'].keys():\n keyboard.press(youtube_commands['media'][key])\n keyboard.release(youtube_commands['media'][key])\n \n\n elif mode == 'NORMAL':\n if key in media_commands.keys():\n keyboard.press(media_commands[key])\n keyboard.release(media_commands[key])","repo_name":"shreydan/media-control","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"67"}
+{"seq_id":"24244890922","text":"# This is the number of pixels, along which the finger can move while the user tries to hold the finger steady below the required word.\nPERMISSIBLE_FINGER_MOVEMENT = 4\n# This is the number of times for which the detected finger co-ordinate must be within the PERMISSIBLE_FINGER_MOVEMENT\nMIN_STEADY_FINGER_COUNT = 2\n\n# These are the number of pixels, from the centre of the detected finger co-ordinate, for cropping the image.\nCROP_DISTANCE_LEFT = 100\nCROP_DISTANCE_RIGHT = 100\n# The CROP_DISTANCE_BOTTOM is the pixels from the detected finger co-ordinate to the actual word's bottom co-ordinate.\n# This is to take into account the sharpness of the user's finger tip.\nCROP_DISTANCE_BOTTOM = 15\nCROP_DISTANCE_TOP = 100\n\n# Factor to Zoom the Cropped Image.\nZOOM_PERCENTAGE = 2\n# Centre of the Cropped Image\nCROPPED_IMAGE_CENTRE = 200\n\n# Confidence Levels\nFINGER_DETECTION_CONFIDENCE = 0.7\nFINGER_TRACKING_CONFIDENCE = 0.5\nOCR_CONFIDENCE = 0.6\n\n# Rescaling Factor:\nRESCALE_FACTOR = 0.2\n\n# Rotate Flag:\n# 1 for Rotate, -1 for No Rotate\nROTATE = 1\n","repo_name":"KhushalPShah/Book-Companion","sub_path":"Firmware/Python/Distant-Camera/Finger-Detection/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"6407880284","text":"import requests\nimport pandas as pd\nimport re\nfrom bs4 import BeautifulSoup\nimport csv\n\ndef read_data():\n data = pd.read_excel('smaorter-2015_ver2.xlsx', skiprows=list(range(9)), usecols=list(range(4, 10)))\n data2 = pd.read_excel('tatorter-2015.xlsx', skiprows=list(range(10)), usecols=list(range(4, 20)))\n\n data.drop_duplicates(subset='Distriktsnamn', inplace=True)\n\n smaort = list(data.Distriktsnamn.unique())\n tatort = list(data2.Tätortsbeteckning.unique())\n #print(smaort)\n #print(tatort)\n\n sma_by = list(filter(lambda x: 'by' in x, smaort))\n sma_stad = list(filter(lambda x: 'stad' in x, smaort))\n\n print('Antal byar i grupp småort: {}'.format(len(sma_by)))\n print('Antal städer i grupp småort: {}'.format(len(sma_stad)))\n \n tat_by = list(filter(lambda x: 'by' in x, tatort))\n tat_stad = list(filter(lambda x: 'stad' in x, tatort))\n\n print('Antal byar i grupp tätort: {}'.format(len(tat_by)))\n print('Antal städer i grupp tätort: {}'.format(len(tat_stad)))\n\n return data, data2, smaort, tatort\n\n_, _, smaort, tatort = read_data()\ntotal = smaort+tatort\n\nsammansattning = {name: [] for name in total}\n\ndef SALDO_sammansattning(ortsbeteckning):\n for ort in ortsbeteckning:\n r = requests.get('http://spraakbanken.gu.se/ws/saldo-ws/sms/json/{}'.format(ort))\n try:\n jsondecode = r.json()\n #print(jsondecode)\n if jsondecode != []:\n n = r.json()\n for item in n[0]:\n print(item)\n if item in sammansattning[ort]:\n continue\n #print(item['segment'])\n else:\n sammansattning[ort].append(item['segment'])\n if jsondecode == []:\n #else:\n sammansattning[ort].append(ort)\n #print(ort)\n #pass\n except:\n pass\n\nSALDO_sammansattning(total)\nprint(sammansattning)\n\n\ndef scrapewiki(url):\n wiki_url = requests.get(url).text\n soup = BeautifulSoup(wiki_url,'lxml')\n tag = soup.find('div', {'class' : 'toc'})\n # find all span tags which contain the place name\n spans = tag.findAll('span')\n efterled = [span.contents[0] for span in spans]\n # the following line will still leave some redundant \"floatlike\" strings:\n efterled = [i for i in efterled[9:-11] if not i.isdigit()]\n efterled = [re.sub(r'[0-9|-]', '', s) for s in efterled]\n efterled = [re.sub(r'/', ' ', s) for s in efterled]\n empty = ['hult', 'borg', 'sta', 'vik', 'strand', 'berg', 'norra', 'södra', 'västra', 'östra', 'bro', 'bron', 'lund', 'sund']\n for suffix in efterled:\n n = suffix.split(',')\n n = n[0].split(' ')\n empty.extend(n)\n return empty\n\nefterled = scrapewiki('https://sv.wikipedia.org/wiki/Svenska_ortnamnsefterled')\n\nfor key, value in sammansattning.items():\n #print(value)\n #print(key)\n #if len([key]) > 1:\n if len(value) > 1:\n continue\n else: \n for suffix in efterled:\n if key.endswith(suffix):\n sammansattning[key] = [key.split(suffix)[0], suffix]\n continue\n\n#print(sammansattning)\n\n\n# for key, value in sammansatta.items():\n# #print(key)\n# if len([key]) > 1:\n# pass\n# else: \n# for suffix in efterled:\n# if key.endswith(suffix):\n# sammansatta[key] = [key.split(suffix)[0], suffix]\n# continue\n\n# print(sammansatta)\n\n#sammansatta = SALDO_sammansattning(total)\n\n# w = csv.writer(open(\"placenames.csv\", \"w\"))\n# for key, val in sammansattning.items():\n# w.writerow([key, val])\n\n\n","repo_name":"1noll1/LinsProjekt","sub_path":"SALDO_sammansattning.py","file_name":"SALDO_sammansattning.py","file_ext":"py","file_size_in_byte":3686,"program_lang":"python","lang":"sv","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"35642843252","text":"from google.appengine.ext import db\nfrom datetime import datetime\nfrom p12utils import *\nimport logging\n\nclass MasterModel(db.Model):\n\tpass\n\t# def toDict(self):\n\t# \treturn p12Encoder.encode(db.to_dict(self))\n\n\t# def save(self,params):\n\t# \tfor k,v in params.iteritems():\n\t# \t\tif k == 'id':continue;\n\t# \t\tsetattr(self,k,v)\n\t# \tself.put()\n\nclass User(MasterModel):\n\tessence=db.IntegerProperty(default=1000)\n\tnumslots = db.IntegerProperty(default=1)\n\tstatus = db.TextProperty(default='Playing Quicksilver!')\n\n\tdef toDict(self):\n\n\t\td = db.to_dict(self)\n\t\tcharacters = []\n\t\tfor character in self.toons:\n\t\t\tcharacters.append(db.to_dict(character))\n\t\td['characters'] = characters\n\t\treturn d\n\t\n\nclass Toon(MasterModel):\n\tskillpoints = db.IntegerProperty(default=10)\n\tname = db.StringProperty(required=True)\n\tlevel = db.IntegerProperty(default=1)\n\tgold = db.IntegerProperty(default=1000)\n\trace = db.StringProperty(required=True)\n\texp= db.IntegerProperty(default=0)\n\tgender=db.StringProperty(required=True)\n\tenergy=db.IntegerProperty(default=10)\n\tenergyMax=db.IntegerProperty(default=10)\n\tlastEnergyUpdate=db.DateTimeProperty()\n\trank=db.IntegerProperty(default=0)\n\n\tstrength = db.FloatProperty(default=1.0)\n\tintelligence = db.FloatProperty(default=1.0)\n\tdefense = db.FloatProperty(default=1.0)\n\tendurance = db.FloatProperty(default=1.0)\n\n\tuserid = db.StringProperty()\n\tguild = db.ReferenceProperty()\n\tuser = db.ReferenceProperty(User,collection_name='toons')\n\n\tdef toDict(self):\n\t\td = db.to_dict(self)\n\t\tquests = []\n\t\tfor quest in self.quests:\n\t\t\tquests.append(quest.toDict())\n\t\td['quests'] = quests\n\t\td['essence'] = self.user.essence\n\n\t\t# calculate amount of energy gained from last time this toon was queried\n\t\t\n\n\t\treturn d\n\n\nBUFF_STATE_ON = 1\nBUFF_STATE_PAUSED = 2\nBUFF_STATE_EXPIRED = 3\nclass ToonBuff(MasterModel):\n\tcharacter = db.ReferenceProperty(Toon,collection_name=\"buffs\")\n\tduration = db.DateTimeProperty()\n\tstarted = db.DateTimeProperty()\n\tname = db.StringProperty()\n\tdescription = db.TextProperty()\n\tstate = db.IntegerProperty(default=BUFF_STATE_ON)\n\nclass Equipment(MasterModel):\n\tcharcter = db.ReferenceProperty(Toon,collection_name='equipment_sets')\n\n\n\nclass Inventory(MasterModel):\n\tcharacter = db.ReferenceProperty(Toon,collection_name=\"inventories\")\n\tnumSlots = db.IntegerProperty(default=8)\n\n\tdef toDict(self):\n\t\td = db.to_dict(self)\n\t\titems = []\n\t\tfor item in db.query_descendants(self):\n\t\t\titems.append(item.toDict())\n\t\td['items'] = items\n\t\treturn d\n\nclass InventoryItem(MasterModel):\n\tname = db.StringProperty()\n\tprice = db.IntegerProperty()\n\tdescription= db.TextProperty()\n\ticonUrl = db.StringProperty()\n\n\tdef toDict(self):\n\t\td = db.to_dict(self)\n\t\td['id'] = self.key().id()\n\t\treturn d\n\nclass Quest(MasterModel):\n\tname = db.StringProperty()\n\tdescription = db.StringProperty()\n\tstepCurrent=db.IntegerProperty(default=0)\n\tstepTotal=db.IntegerProperty()\n\tcomplete = db.BooleanProperty(default=False)\n\tboss = db.BooleanProperty(default=False)\n\tloot = db.StringListProperty()\n\tgold = db.IntegerProperty()\n\tessence = db.IntegerProperty()\n\tenergy = db.IntegerProperty()\n\texp = db.IntegerProperty()\n\tcharacter = db.ReferenceProperty(Toon,collection_name='quests')\n\n\tdef createFromObject(self,details):\n\t\tself.name = details.name\n\t\tself.description = details.description\n\t\tself.stepTotal = details.steps\n\t\tself.gold = details.gold\n\t\tself.essence = details.essence\n\t\tself.energy = details.energy\n\t\tself.exp = details.exp\n\n\tdef toDict(self):\n\t\td= db.to_dict(self)\n\t\td['id'] = self.key().id()\n\t\treturn d\n\nclass NewsEntry(MasterModel):\n\ttitle = db.StringProperty()\n\tcontent = db.TextProperty()\n\tcreated = db.DateTimeProperty(auto_now_add=True)\n\timageUrl = db.StringProperty()\n\n\tdef toDict(self):\n\t\treturn db.to_dict(self)\n\nclass Guild(MasterModel):\n\tname = db.StringProperty()\n\tlevel = db.IntegerProperty(default=1)\n\treputation = db.IntegerProperty(default=1)\n\tmembers = db.StringListProperty()\n\n\tdef toDict(self):\n\t\td = db.to_dict(self)\n\t\treturn d\n\n\n\n","repo_name":"zanemx/quicksilver","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"71787723735","text":"import RPi.GPIO as GPIO\nfrom mfrc522 import SimpleMFRC522\n\n\nreader = SimpleMFRC522()\n\ntry:\n\ttext = input('Enter Driving Licence Number for new entry: ')\n\tprint('Now place your Licence to write......')\n\treader.write(text)\n\tprint('Data Successfully Written....')\n\nfinally:\n\tGPIO.cleanup()\n","repo_name":"El-Do-RaDo/RFID","sub_path":"Write.py","file_name":"Write.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"}
+{"seq_id":"20434269197","text":"# given unsorted linked list, remove all duplicates\n\n# Big O\n # Let N be the size of the linked_list\n # O(N) because we traverse the linked list just once\n # Each node we traverse we also attempt insert into\n # buffer hash table, which worst case would be O(N)\n # but average case is O(1) \n\nfrom linked_list import LinkedList\n\nlst = LinkedList()\n\nlst.insert(5)\nlst.insert(2)\nlst.insert(2)\nlst.insert(4)\nlst.insert(5)\nlst.insert(8)\n\ndef insert_buffer(buffer, value):\n val = str(value)\n if (val in buffer):\n return False\n buffer[val] = True\n return True\n\ndef remove_duplicates(linked_list):\n buffer = {}\n head = linked_list.head\n if (not head):\n return linked_list\n insert_buffer(buffer, head.value)\n nxt = head.next\n while(nxt):\n if (not insert_buffer(buffer, nxt.value)):\n head.next = nxt.next\n nxt = head.next\n else:\n head = nxt\n nxt = nxt.next\n\n\n\nprint(f'list before {lst}')\nremove_duplicates(lst)\nprint(f'list after {lst}')","repo_name":"jackhamby/cracking","sub_path":"chapter_2/2_1.py","file_name":"2_1.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"13247312811","text":"# and April Tag code from https://github.com/Kazuhito00/AprilTag-Detection-Python-Sample/blob/main/sample.py \r\n\r\nimport numpy as np\r\nimport cv2\r\n#from pupil_apriltags import Detector\r\nimport apriltag\r\nimport argparse\r\nimport copy\r\nimport time\r\n# import serial\r\n# from pathlib import Path\r\n# from xbee import XBee\r\n\r\n\r\ndef get_args():\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument(\"--device\", type=int, default=0)\r\n parser.add_argument(\"--width\", help='cap width', type=int, default=960)\r\n parser.add_argument(\"--height\", help='cap height', type=int, default=540)\r\n\r\n parser.add_argument(\"--families\", type=str, default='tag36h11')\r\n parser.add_argument(\"--nthreads\", type=int, default=1)\r\n parser.add_argument(\"--quad_decimate\", type=float, default=2.0)\r\n parser.add_argument(\"--quad_sigma\", type=float, default=0.0)\r\n parser.add_argument(\"--refine_edges\", type=int, default=1)\r\n parser.add_argument(\"--decode_sharpening\", type=float, default=0.25)\r\n parser.add_argument(\"--debug\", type=int, default=0)\r\n\r\n\r\n\r\n args = parser.parse_args()\r\n\r\n return args\r\n\r\n\r\ndef main():\r\n\t\r\n\t#initialization and open the port\r\n\t\r\n\t# ser = serial.Serial('/dev/ttyUSB0')\r\n\t# ser.baudrate = 9600\r\n\t# ser.bytesize = serial.EIGHTBITS #number of bits per bytes\r\n\t\r\n\t#exp.ser = serial.Serial('COM4', baudrate=9600, bytesize=serial.EIGHTBITS)\r\n\t#exp.ser.write(chr(1))\r\n\t#ser.parity = serial.PARITY_NONE #set parity check: no parity\r\n\t#ser.stopbits = serial.STOPBITS_ONE #number of stop bits\r\n \t#ser.timeout = None #block read\r\n\t#ser.timeout = 5 #non-block read\r\n \t#ser.timeout = 2 #timeout block read\r\n\t#ser.xonxoff = False #disable software flow control\r\n\t#ser.rtscts = False #disable hardware (RTS/CTS) flow control\r\n\t#ser.dsrdtr = False #disable hardware (DSR/DTR) flow control\r\n\t#print(ser.portstr)\r\n\t \r\n\t#print(\"We are not in the open\")\r\n\t\r\n\t#while (1):\r\n \t\t#if(ser.in_waiting > 0):\r\n \t#if ser.isOpen():\r\n \t\t#print(\"We are in the open\")\r\n \t\t#ser.write(5)\r\n \t\t\r\n \t\t#ser.write('Its Working')\r\n \t\t# read_data = ser.read(10)\r\n \t\t# response = ser.readline()\r\n \t\t#print(\"Data received : \")\r\n \t\t# ser.close()\r\n \t#else:\r\n \t\t#print (\"Can not open serial port\")\r\n \t\t# ser.open()\r\n\t\r\n\t#get args\r\n\targs = get_args()\r\n\tcap_device = args.device\r\n\tcap_width = args.width\r\n\tcap_height = args.height\r\n\tfamilies = args.families\r\n\tnthreads = args.nthreads\r\n\tquad_decimate = args.quad_decimate\r\n\tquad_sigma = args.quad_sigma\r\n\trefine_edges = args.refine_edges\r\n\tdecode_sharpening = args.decode_sharpening\r\n\tdebug = args.debug\r\n\r\n\t# Capturing video through webcam\r\n\t#webcam = cv2.VideoCapture(0)\r\n\ti = -1\r\n\twebcam = cv2.VideoCapture(i)\r\n\twhile not webcam.isOpened():\r\n \t\twebcam = cv2.VideoCapture(i)\r\n \t\ti = i + 1\r\n\tprint(i-1)\r\n\t\r\n\twebcam.set(cv2.CAP_PROP_FRAME_WIDTH, cap_width)\r\n\twebcam.set(cv2.CAP_PROP_FRAME_HEIGHT, cap_height)\r\n\r\n\t# Detector \r\n\toptions = apriltag.DetectorOptions(\r\n\t\tfamilies=families,\r\n\t\tnthreads=nthreads,\r\n\t\tquad_decimate=quad_decimate,\r\n\t\trefine_edges=refine_edges,\r\n\t\tdebug=debug\r\n\t)\r\n\t\r\n\tat_detector = apriltag.Detector()\r\n\telapsed_time = 0\r\n\t\r\n\t#every_four = 0;\r\n\r\n\t#serial_port = serial.Serial('/dev/ttyUSB4', 9600)\r\n\t#xbee2 = XBee(serial_port)\r\n\t# Start a while loop\r\n\twhile(1):\r\n\t\tstart_time = time.time()\r\n\r\n\t\t# Reading the video from the\r\n\t\t# webcam in image frames\r\n\t\tret, imageFrame = webcam.read()\r\n\t\tif not ret:\r\n\t\t\tbreak\r\n\r\n\t\tdebug_image = copy.deepcopy(imageFrame)\r\n\r\n\t\t# Convert the imageFrame in\r\n\t\t# BGR(RGB color space) to\r\n\t\t# HSV(hue-saturation-value)\r\n\t\t# color space\r\n\t\tgray = cv2.cvtColor(imageFrame, cv2.COLOR_BGR2GRAY)\r\n\t\thsvFrame = cv2.cvtColor(imageFrame, cv2.COLOR_BGR2HSV)\r\n\r\n\t\t#Declare coordinates\r\n\t\trobot1_x = 0\r\n\t\trobot1_y = 0\r\n\t\trobot2_x = 0\r\n\t\trobot2_y = 0\r\n\t\t\r\n\t\t# APRIL TAGS!!!\r\n\t\ttags = at_detector.detect(\r\n\t\t\tgray\r\n\t\t)\r\n\t\tkey = cv2.waitKey(1)\r\n\t\tif key == 27: # ESC\r\n\t\t\tbreak\r\n\t\t\r\n\t\tfor r in tags:\r\n\t\t\tif(r.tag_id == 0):\r\n\t\t\t\t#robot #1 coords \r\n\t\t\t\trobot1_x = int(r.center[0])\r\n\t\t\t\trobot1_y = int(r.center[1])\r\n\t\t\tif(r.tag_id == 1):\r\n\t\t\t\t#robot #2 coords \r\n\t\t\t\trobot2_x = int(r.center[0])\r\n\t\t\t\trobot2_y = int(r.center[1])\r\n\t\t\r\n\t\tdebug_image = draw_tags(debug_image, tags, elapsed_time)\r\n\t\telapsed_time = time.time() - start_time\r\n\t\tcv2.imshow('AprilTag Detect Demo', debug_image)\r\n\r\n\t\t# Program Termination\r\n\t\tcv2.imshow(\"Multiple Color Detection in Real-TIme\", imageFrame)\r\n\t\tif cv2.waitKey(10) & 0xFF == ord('q'):\r\n\t\t\tcap.release()\r\n\t\t\tcv2.destroyAllWindows()\r\n\t\t\tbreak\r\n\t\t\t\r\n\t\t# #transmit here!\r\n\t\t# robot1_x_upper, robot1_x_lower = (robot1_x & 0xFFFFFFFF).to_bytes(2, 'big')\r\n\t\t# robot1_y_upper, robot1_y_lower = (robot1_y & 0xFFFFFFFF).to_bytes(2, 'big')\r\n\t\t# robot1_x_final = (robot1_x_lower>>2) | ((robot1_x_upper& 0b11 )<<6)\r\n\t\t# robot1_y_final = (robot1_y_lower>>2) | ((robot1_y_upper& 0b11 )<<6)\r\n\t\t\r\n\t\t# robot2_x_upper, robot2_x_lower = (robot2_x & 0xFFFFFFFF).to_bytes(2, 'big')\r\n\t\t# robot2_y_upper, robot2_y_lower = (robot2_y & 0xFFFFFFFF).to_bytes(2, 'big')\r\n\t\t# robot2_x_final = (robot2_x_lower>>2) | ((robot2_x_upper& 0b11 )<<6)\r\n\t\t# robot2_y_final = (robot2_y_lower>>2) | ((robot2_y_upper& 0b11 )<<6)\r\n\t\t\r\n\t\t# final = [robot1_x_final, robot1_y_final,\r\n\t\t# \t\trobot2_x_final, robot2_y_final]\r\n\t\t\t\r\n\t\t# finalTx = bytes(final)\r\n\t\t\r\n\t\t# ser.write(finalTx) #we transmitted serially over GPIO for EECS 373\r\n\r\n\r\n\t\t#finalTx = str(finalTx)\r\n\t\t\r\n\t\t#print(len(testArr))\r\n\t\t#print(final)\r\n\t\t#xbee2.write(finalTx)\r\n\t\t#response = xbee2.wait_read_frame()\r\n\t\t\r\n\t\t#print(final)\r\n \t\t\r\n\t\t#print(ser.read())\r\n\t\t#if(every_four == 5):\r\n\t\t#\tser.write(testArr)\r\n\t\t#\tevery_four = 0\r\n\t\t#else:\r\n\t\t#\tevery_four = every_four + 1\r\n\t\t\r\n\t\t\r\n\t\r\n\r\ndef draw_tags(\r\n image,\r\n tags,\r\n elapsed_time,\r\n):\r\n for tag in tags:\r\n tag_family = tag.tag_family\r\n tag_id = tag.tag_id\r\n center = tag.center\r\n corners = tag.corners\r\n\r\n center = (int(center[0]), int(center[1]))\r\n corner_01 = (int(corners[0][0]), int(corners[0][1]))\r\n corner_02 = (int(corners[1][0]), int(corners[1][1]))\r\n corner_03 = (int(corners[2][0]), int(corners[2][1]))\r\n corner_04 = (int(corners[3][0]), int(corners[3][1]))\r\n\r\n cv2.circle(image, (center[0], center[1]), 5, (0, 0, 255), 2)\r\n\r\n cv2.line(image, (corner_01[0], corner_01[1]),\r\n (corner_02[0], corner_02[1]), (255, 0, 0), 2)\r\n cv2.line(image, (corner_02[0], corner_02[1]),\r\n (corner_03[0], corner_03[1]), (255, 0, 0), 2)\r\n cv2.line(image, (corner_03[0], corner_03[1]),\r\n (corner_04[0], corner_04[1]), (0, 255, 0), 2)\r\n cv2.line(image, (corner_04[0], corner_04[1]),\r\n (corner_01[0], corner_01[1]), (0, 255, 0), 2)\r\n\r\n # cv2.putText(image,\r\n # str(tag_family) + ':' + str(tag_id),\r\n # (corner_01[0], corner_01[1] - 10), cv2.FONT_HERSHEY_SIMPLEX,\r\n # 0.6, (0, 255, 0), 1, cv2.LINE_AA)\r\n cv2.putText(image, str(tag_id), (center[0] - 10, center[1] - 10),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2, cv2.LINE_AA)\r\n\r\n cv2.putText(image,\r\n \"Elapsed Time:\" + '{:.1f}'.format(elapsed_time * 1000) + \"ms\",\r\n (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2,\r\n cv2.LINE_AA)\r\n\r\n return image\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"EmiliaPsacharopoulos/multi-agent-collision-avoidance","sub_path":"ComputerVision_EECS467.py","file_name":"ComputerVision_EECS467.py","file_ext":"py","file_size_in_byte":7515,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"}
+{"seq_id":"11013698281","text":"\"\"\"Unit tests for //labm8/app.\"\"\"\nimport pathlib\n\nfrom absl import flags as absl_flags\n\nfrom labm8.py import app\nfrom labm8.py import app_test_flags\nfrom labm8.py import test\n\nFLAGS = app_test_flags.FLAGS\n\nMODULE_UNDER_TEST = None\n\n\ndef test_string_flag():\n FLAGS.unparse_flags()\n FLAGS([\"argv[0]\", \"--string_flag\", \"Hello, world!\"])\n assert FLAGS.string_flag == \"Hello, world!\"\n\n\ndef test_output_path_flag(tempdir: pathlib.Path):\n FLAGS.unparse_flags()\n FLAGS([\"argv[0]\", \"--output_path_flag\", str(tempdir / \"file\")])\n assert FLAGS.output_path_flag == pathlib.Path(tempdir / \"file\")\n\n\ndef test_int_flag_validator():\n FLAGS.unparse_flags()\n FLAGS([\"argv[0]\", \"--int_flag_with_validator\", \"2\"])\n with test.Raises(absl_flags.IllegalFlagValueError):\n FLAGS([\"argv[0]\", \"--int_flag_with_validator\", \"-1\"])\n\n\ndef test_database_flag(tempdir: pathlib.Path):\n FLAGS.unparse_flags()\n url = f\"sqlite:///{tempdir}/db\"\n FLAGS([\"argv[0]\", \"--database_flag\", url])\n # The database isn't created until the flag value is called.\n assert not (tempdir / \"db\").is_file()\n assert FLAGS.database_flag().url == url\n assert (tempdir / \"db\").is_file()\n\n\ndef test_FlagsToDict():\n flags_dict = app.FlagsToDict()\n assert \"absl.logging.alsologtostderr\" in flags_dict\n assert flags_dict[\"absl.logging.alsologtostderr\"] in {True, False}\n\n\ndef test_FlagsToString():\n flags_str = app.FlagsToString()\n assert \"--noalsologtostderr\" in flags_str or \"--alsologtostderr\" in flags_str\n\n\nif __name__ == \"__main__\":\n test.Main()\n","repo_name":"ChrisCummins/phd","sub_path":"labm8/py/app_test.py","file_name":"app_test.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":181,"dataset":"github-code","pt":"67"}
+{"seq_id":"26041888930","text":"# import libraries here\nfrom __future__ import print_function\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport collections\n\n# keras\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Activation\nfrom keras.optimizers import SGD\n\nimport matplotlib.pylab as pylab\n\n\ndef load_image(path):\n return cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)\n\n\ndef image_gray(image):\n return cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\n\ndef image_bin(image_gs):\n height, width = image_gs.shape[0:2]\n image_binary = np.ndarray((height, width), dtype=np.uint8)\n ret, image_bin = cv2.threshold(image_gs, 200, 255, cv2.THRESH_BINARY)\n display_image(image_bin)\n # image_bin = cv2.adaptiveThreshold(image_gs, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)\n return image_bin\n\n\ndef invert(image):\n return 255 - image\n\n\ndef display_image(image, color=False):\n if color:\n plt.imshow(image)\n plt.show()\n else:\n plt.imshow(image, 'gray')\n plt.show()\n\n\ndef dilate(image):\n kernel = np.ones((3, 3)) # strukturni element 3x3 blok\n return cv2.dilate(image, kernel, iterations=1)\n\n\ndef erode(image):\n kernel = np.ones((3, 3)) # strukturni element 3x3 blok\n return cv2.erode(image, kernel, iterations=1)\n\n\ndef resize_region(region):\n '''Transformisati selektovani region na sliku dimenzija 28x28'''\n return cv2.resize(region, (28, 28), interpolation=cv2.INTER_NEAREST)\n\n\ndef isAlreadyAdded(regions_array_filtered, x):\n for region in regions_array_filtered:\n if region[1][0] == x:\n return True\n\n return False\n\n\ndef select_roi(image_orig, image_bin):\n '''Oznaciti regione od interesa na originalnoj slici. (ROI = regions of interest)\n Za svaki region napraviti posebnu sliku dimenzija 28 x 28.\n Za označavanje regiona koristiti metodu cv2.boundingRect(contour).\n Kao povratnu vrednost vratiti originalnu sliku na kojoj su obeleženi regioni\n i niz slika koje predstavljaju regione sortirane po rastućoj vrednosti x ose\n '''\n img, contours, hierarchy = cv2.findContours(image_bin.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n sorted_regions = [] # lista sortiranih regiona po x osi (sa leva na desno)\n regions_array = []\n for i in range(len(contours)):\n x, y, w, h = cv2.boundingRect(contours[i]) # koordinate i velicina granicnog pravougaonika\n area = cv2.contourArea(contours[i])\n if h > 10 and w > 10 and hierarchy[0, i, 3] == -1:\n # kopirati [y:y+h+1, x:x+w+1] sa binarne slike i smestiti u novu sliku\n # označiti region pravougaonikom na originalnoj slici (image_orig) sa rectangle funkcijom\n region = image_bin[y:y + h + 1, x:x + w + 1]\n regions_array.append([resize_region(region), (x, y, w, h)])\n # cv2.rectangle(image_orig, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n # sortirati sve regione po x osi (sa leva na desno) i smestiti u promenljivu sorted_regions\n regions_array = sorted(regions_array, key=lambda item: item[1][0])\n\n regions_array_filtered = []\n\n for region in regions_array:\n x1 = region[1][0]\n y1 = region[1][1]\n w1 = region[1][2]\n h1 = region[1][3]\n found = False\n for smaller_region in regions_array:\n x2 = smaller_region[1][0]\n y2 = smaller_region[1][1]\n w2 = smaller_region[1][2]\n h2 = smaller_region[1][3]\n if x2 > x1 and x2 + w2 < x1 + w1:\n found = True\n x = x1\n y = y2\n w = w1\n h = h1 + h2\n if not isAlreadyAdded(regions_array_filtered, x):\n cutout = image_bin[y:y + h + 1, x:x + w + 1]\n regions_array_filtered.append([resize_region(cutout), (x, y, w, h)])\n cv2.rectangle(image_orig, (x, y), (x + w, y + h), (255, 0, 0), 2)\n elif x2 < x1 and x2 + w2 > x1 + w1:\n found = True\n x = x2\n y = y1\n w = w2\n h = h1 + h2\n if not isAlreadyAdded(regions_array_filtered, x):\n cutout = image_bin[y:y + h + 1, x:x + w + 1]\n regions_array_filtered.append([resize_region(cutout), (x, y, w, h)])\n cv2.rectangle(image_orig, (x, y), (x + w, y + h), (255, 0, 0), 2)\n elif x1 < x2 < x1+w1:\n x = x1\n y= y2\n w = 2*w2 - w1\n h = h1 + h2\n if not isAlreadyAdded(regions_array_filtered, x):\n cutout = image_bin[y:y + h + 1, x:x + w + 1]\n regions_array_filtered.append([resize_region(cutout), (x, y, w, h)])\n cv2.rectangle(image_orig, (x, y), (x + w, y + h), (0, 0, 0), 2)\n if not found:\n cutout = image_bin[y1:y1 + h1 + 1, x1:x1 + w1 + 1]\n regions_array_filtered.append([resize_region(cutout), (x1, y1, w1, h1)])\n cv2.rectangle(image_orig, (x1, y1), (x1 + w1, y1 + h1), (0, 255, 0), 2)\n\n sorted_regions = [region[0] for region in regions_array_filtered]\n\n return image_orig, sorted_regions\n\n\ndef scale_to_range(image): # skalira elemente slike na opseg od 0 do 1\n ''' Elementi matrice image su vrednosti 0 ili 255.\n Potrebno je skalirati sve elemente matrica na opseg od 0 do 1\n '''\n return image / 255\n\n\ndef matrix_to_vector(image):\n '''Sliku koja je zapravo matrica 28x28 transformisati u vektor sa 784 elementa'''\n return image.flatten()\n\n\ndef prepare_for_ann(regions):\n '''Regioni su matrice dimenzija 28x28 čiji su elementi vrednosti 0 ili 255.\n Potrebno je skalirati elemente regiona na [0,1] i transformisati ga u vektor od 784 elementa '''\n ready_for_ann = []\n for region in regions:\n # skalirati elemente regiona\n # region sa skaliranim elementima pretvoriti u vektor\n # vektor dodati u listu spremnih regiona\n scale = scale_to_range(region)\n ready_for_ann.append(matrix_to_vector(scale))\n\n return ready_for_ann\n\n\ndef convert_output(alphabet):\n '''Konvertovati alfabet u niz pogodan za obučavanje NM,\n odnosno niz čiji su svi elementi 0 osim elementa čiji je\n indeks jednak indeksu elementa iz alfabeta za koji formiramo niz.\n Primer prvi element iz alfabeta [1,0,0,0,0,0,0,0,0,0],\n za drugi [0,1,0,0,0,0,0,0,0,0] itd..\n '''\n nn_outputs = []\n for index in range(len(alphabet)):\n output = np.zeros(len(alphabet))\n output[index] = 1\n nn_outputs.append(output)\n return np.array(nn_outputs)\n\n\ndef create_ann():\n '''Implementacija veštačke neuronske mreže sa 784 neurona na uloznom sloju,\n 128 neurona u skrivenom sloju i 10 neurona na izlazu. Aktivaciona funkcija je sigmoid.\n '''\n ann = Sequential()\n ann.add(Dense(128, input_dim=784, activation='sigmoid'))\n ann.add(Dense(30, activation='sigmoid'))\n return ann\n\n\ndef train_ann(ann, X_train, y_train):\n '''Obucavanje vestacke neuronske mreze'''\n X_train = np.array(X_train, np.float32) # dati ulazi\n y_train = np.array(y_train, np.float32) # zeljeni izlazi za date ulaze\n\n # definisanje parametra algoritma za obucavanje\n sgd = SGD(lr=0.01, momentum=0.9)\n ann.compile(loss='mean_squared_error', optimizer=sgd)\n\n # obucavanje neuronske mreze\n ann.fit(X_train, y_train, epochs=500, batch_size=1, verbose=0, shuffle=False)\n\n return ann\n\ndef winner(output): # output je vektor sa izlaza neuronske mreze\n \"\"\"pronaći i vratiti indeks neurona koji je najviše pobuđen\"\"\"\n return max(enumerate(output), key=lambda x: x[1])[0]\n\ndef display_result(outputs, alphabet):\n '''za svaki rezultat pronaći indeks pobedničkog\n regiona koji ujedno predstavlja i indeks u alfabetu.\n Dodati karakter iz alfabet u result'''\n result = []\n for output in outputs:\n result.append(alphabet[winner(output)])\n return result\n\ndef train_or_load_character_recognition_model(train_image_paths, serialization_folder):\n \"\"\"\n Procedura prima putanje do fotografija za obucavanje (dataset se sastoji iz razlicitih fotografija alfabeta), kao i\n putanju do foldera u koji treba sacuvati model nakon sto se istrenira (da ne trenirate svaki put iznova)\n\n Procedura treba da istrenira model i da ga sacuva u folder \"serialization_folder\" pod proizvoljnim nazivom\n\n Kada se procedura pozove, ona treba da trenira model ako on nije istraniran, ili da ga samo ucita ako je prethodno\n istreniran i ako se nalazi u folderu za serijalizaciju\n\n :param train_image_paths: putanje do fotografija alfabeta\n :param serialization_folder: folder u koji treba sacuvati serijalizovani model\n :return: Objekat modela\n \"\"\"\n # TODO - Istrenirati model ako vec nije istreniran, ili ga samo ucitati iz foldera za serijalizaciju\n ann = create_ann()\n\n for i in range(len(train_image_paths)):\n image_color = load_image(train_image_paths[i])\n display_image(image_color)\n img = invert(image_bin(image_gray(image_color)))\n display_image(img)\n img_bin = erode(dilate(img))\n display_image(img_bin)\n selected_regions, numbers = select_roi(image_color.copy(), img)\n display_image(selected_regions)\n # if i == 0:\n # alphabet = ['A', 'B', 'C', 'Č', 'Ć', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q',\n # 'R','S', 'Š', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'Ž']\n # else:\n # alphabet = ['a', 'b', 'c', 'č', 'ć', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n # 'r', 's', 'š', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'ž']\n #\n # inputs = prepare_for_ann(numbers)\n # outputs = convert_output(alphabet)\n #\n # ann = train_ann(ann, inputs, outputs)\n\n # model = None\n # return model\n\n return ann\n\n\ndef extract_text_from_image(trained_model, image_path, vocabulary):\n \"\"\"\n Procedura prima objekat istreniranog modela za prepoznavanje znakova (karaktera), putanju do fotografije na kojoj\n se nalazi tekst za ekstrakciju i recnik svih poznatih reci koje se mogu naci na fotografiji.\n Procedura treba da ucita fotografiju sa prosledjene putanje, i da sa nje izvuce sav tekst koriscenjem\n openCV (detekcija karaktera) i prethodno istreniranog modela (prepoznavanje karaktera), i da vrati procitani tekst\n kao string.\n\n Ova procedura se poziva automatski iz main procedure pa nema potrebe dodavati njen poziv u main.py\n\n :param trained_model: Istrenirani model za prepoznavanje karaktera\n :param image_path: Putanja do fotografije sa koje treba procitati tekst.\n :param vocabulary: Recnik SVIH poznatih reci i ucestalost njihovog pojavljivanja u tekstu\n :return: Tekst procitan sa ulazne slike\n \"\"\"\n extracted_text = \"\"\n # TODO - Izvuci tekst sa ulazne fotografije i vratiti ga kao string\n\n return extracted_text\n","repo_name":"david-drvar/soft-computing","sub_path":"sc_2020_challenge_2/iterations/process 0.py","file_name":"process 0.py","file_ext":"py","file_size_in_byte":11128,"program_lang":"python","lang":"sh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"8277718038","text":"from lang.lib_6_run import *\nfrom tests.test import *\nfrom utils.arg_parser import *\nfrom utils.flattap import *\nfrom utils.print_if import *\nfrom utils.read_file import *\n\n\"\"\"\nSteps:\n...\n\nShortcuts:\n...\n\"\"\"\n\n\ndef get_runner(dev):\n def run(action, value, header, show_res=True):\n print_if(dev)(header)\n return flattap(lambda: action(value),\n lambda res: print_if(dev and show_res)(res, end=\"\\n\\n\")\n )\n\n return run\n\n\ndef unsafe_run_code(code, dev):\n run = get_runner(dev)\n print_if(dev)(\"1_CODE\")\n print_if(dev)(code)\n try:\n # desugared = run(desugar, code, \"2_DESUGARED\") # 231108\n tokens = run(tokenize, code, \"3_TOKENS\")\n expr = run(parse, tokens, \"4_EXPR\")\n typed = run(typify, expr, \"5_TYPED\")\n shown = run(build_str_py, typed, \"6_SHOWN\")\n run(unsafe_run_built, build_py(shown), \"7_RUNNING\",\n show_res=False\n )\n except Exception as e:\n print(e)\n\n\ndef main():\n args = argParser.parse(get_args())\n if args.test:\n run_tests()\n elif args.code:\n unsafe_run_code(code=read_file(args.code), dev=args.dev)\n\n\nargParser = (\n ArgParser()\n .add(\"code\", nargs=\"?\")\n .add(\"--dev\", action=\"store_true\")\n .add(\"--test\", action=\"store_true\")\n)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tebrobert/rt_lang","sub_path":"rt.py","file_name":"rt.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"9413955205","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport datetime\n\nimport mwclient\n\nimport cnf\n\ndef main():\n creds = {'protocol': cnf.protocol,\n 'site': cnf.site,\n 'useragent': cnf.useragent,\n 'username': cnf.username,\n 'password': cnf.password}\n site = login(creds)\n\n # get data for scoreboard\n new_participants = get_new_participant_count(site)\n inspire_idea_count = get_inspire_idea_count(site)\n days_left = calculate_days_left()\n\n # fill in template with data\n text_to_post = format_template(inspire_idea_count, new_participants,\n days_left)\n # post updated template\n update_templates(text_to_post, site)\n\n\ndef login(creds):\n \"\"\"Initialize mwclient Site and log in.\"\"\"\n site = mwclient.Site((creds['protocol'], creds['site']),\n clients_useragent=creds['useragent'])\n site.login(creds['username'], creds['password'])\n return site\n\n\ndef get_new_participant_count(site):\n \"\"\"Get the count of all logged-in contributors to pages in the main\n Inspire namespace and to their associated talk pages, if any.\n \"\"\"\n page_ids = get_page_ids(site)\n page_ids.extend(get_talk_page_ids(page_ids, site))\n participants = get_participants(page_ids, site)\n return len(set(participants))\n\n\ndef get_page_ids(site):\n \"\"\"Get a list of strings containing the pageids of all members of\n Category:IdeaLab/Ideas/Inspire and Category:Inspire campaign.\"\"\"\n categories = ['Category:IdeaLab', 'Category:IdeaLab/Ideas/Inspire/Measuring_community_health', 'Category:Inspire_campaign']\n pageids = []\n for category in categories:\n kwargs = {'action': 'query',\n 'list': 'categorymembers',\n 'cmtitle': category,\n 'cmlimit': 'max'}\n response = site.api(**kwargs)\n for result in response['query']['categorymembers']:\n pageids.append(str(result['pageid']))\n return pageids\n\n\ndef get_participants(page_ids, site):\n \"\"\"Given a list of pageids, get the people who have edited them\n since 09 July 2018.\"\"\"\n contributors = []\n for page_id in page_ids:\n response = site.api(action='query', prop='revisions', rvstart='2018-07-01T00:00:00Z', rvlimit='max', pageids=page_id, rvprop='userid', rvdir='newer')\n revisions = response['query']['pages'][page_id].get('revisions')\n if revisions:\n for revision in revisions:\n contributors.append(revision['userid'])\n return contributors\n\n\ndef get_talk_page_ids(page_ids, site):\n \"\"\"Given a list of page ids, get a list of page ids of the\n corresponding talk pages (if they exist).\"\"\"\n page_ids_string = '|'.join(page_ids)\n response = site.api(action='query',\n prop='info',\n inprop='talkid',\n pageids=page_ids_string)\n pages = response['query']['pages']\n talk_page_ids = []\n for page in pages:\n if pages[page]['ns'] == 200 and pages[page].get('talkid'):\n talk_page_ids.append(str(pages[page]['talkid']))\n return talk_page_ids\n\n\ndef get_inspire_idea_count(site):\n \"\"\"Get the number of pages in Category:IdeaLab/Ideas/Inspire, and\n correct for the three that are templates, not ideas.\"\"\"\n response = site.api(action='query',\n prop='categoryinfo',\n titles='Category:IdeaLab/Ideas/Inspire/Measuring_community_health')\n page_count = parse_idea_count_response(response)\n\n # don't count probox, etc\n # actual_page_count = page_count - 3\n return page_count\n\n\ndef parse_idea_count_response(response):\n for page in response['query']['pages']:\n page_count = response['query']['pages'][page]['categoryinfo']['pages']\n return page_count\n\n\ndef calculate_days_left():\n \"\"\"Calculate the number of days until August 09, 2018. If the date\n has passed, return 0.\"\"\"\n ending_date = datetime.date(2018, 8, 9)\n days_left = (ending_date - datetime.date.today()).days\n if days_left >= 0:\n return days_left\n else:\n return 0\n\n\ndef format_template(ideas, participants, days_left):\n \"\"\"Put the collected data in the template.\"\"\"\n filled_template = '{{{{IdeaLab/Inspire/Scoreboard\\n|ideas= {}\\n|'\\\n 'participants= {}\\n|'\\\n 'days_left= {}\\n}}}}'.format(ideas, participants,\n days_left)\n return filled_template\n\n\ndef update_templates(text_to_post, site):\n \"\"\"\"\"\"\n scoreboard = site.Pages['Grants:IdeaLab/Inspire/Scoreboard']\n response = scoreboard.save(text_to_post,\n summary='Automatic scoreboard update')\n return response\n\n\nif __name__=='__main__':\n main()\n\n","repo_name":"jtmorgan/grantsbot","sub_path":"inspire/scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":4834,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"}
+{"seq_id":"5606499760","text":"class demo:\n count=0 #here count is a static /class variable \n def __init__(self):\n self.value=1 #object/instatnce/nonstatic variable\n\ndemo.count=1\nprint(demo.count) #class variable can be accesed using class name and can be cahngd by class variable \nd=demo()\nd.value=0\nprint(d.value) #non-static variable or instance can be accessed by object name and can be cahnged by object name\n\n\n\n","repo_name":"UzairIshfaq1234/python_programming_PF_OOP_GUI","sub_path":"static vaiab.py","file_name":"static vaiab.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"71662959254","text":"from .task import task\n\n\nclass stagego(task):\n\n def __init__(self,\n dx=-100,\n speed=1,\n **kwargs):\n super(stagego, self).__init__(**kwargs)\n self.dx = dx\n self.speed = speed\n self.nframes = 10\n\n def initialize(self, frame):\n self.wstage = self.parent.wstage\n self.stage = self.wstage.instrument\n self.position = self.stage.position()\n self.stage.setMaxSpeed(self.speed)\n self.goal = self.position[0] + self.dx\n self.stage.moveX(self.goal)\n\n def doprocess(self, frame):\n if self.stage.stageMoving():\n self.nframes = 2\n\n def dotask(self):\n self.stage.reset()\n","repo_name":"davidgrier/pyfab","sub_path":"tasks/obsolete/stagego.py","file_name":"stagego.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"}
+{"seq_id":"4391636511","text":"# https://www.acmicpc.net/problem/1158\n\ndef solve():\n import sys\n N, K = map(int, sys.stdin.readline().split())\n num_list = [x for x in range(1, N+1)]\n result = []\n idx = 0\n \n for i in range(N):\n idx += (K-1)\n if idx >= N:\n idx %= N\n result.append(str(num_list.pop(idx)))\n N -= 1\n\n print(\"<\",\", \".join(result)[:],\">\", sep='')\n\nif __name__ == \"__main__\":\n solve()","repo_name":"Carrotww/Carrot_Algorithm","sub_path":"2023/23_04/2023_04_21_백준_요세푸스.py","file_name":"2023_04_21_백준_요세푸스.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"30264282402","text":"from django.conf import settings\r\nfrom django.db import models\r\nfrom django.db.models.signals import post_save\r\nfrom django.utils import timezone\r\n\r\nCategory_CHOICE = (\r\n ('s','shirt'),\r\n ('sw','sport wear'),\r\n ('ow','outwear'),\r\n)\r\n\r\nLabel_CHOICE = (\r\n ('e','No Label'),\r\n ('p','new'),\r\n ('d','best seller'),\r\n)\r\n\r\nclass Item(models.Model):\r\n title = models.CharField(max_length=100)\r\n price = models.FloatField()\r\n discount_price = models.FloatField(blank=True,null=True)\r\n category = models.CharField(choices=Category_CHOICE,max_length=2)\r\n label = models.CharField(choices=Label_CHOICE,max_length=1)\r\n description = models.CharField(max_length=400)\r\n extra_info = models.TextField(True,null=True)\r\n image = models.ImageField(upload_to=\"images\")\r\n\r\n def __str__(self):\r\n return self.title\r\n\r\nclass OrderItem(models.Model):\r\n item = models.ForeignKey(Item,on_delete=models.CASCADE)\r\n quantity = models.IntegerField(default=1)\r\n user = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)\r\n ordered = models.BooleanField(default=False)\r\n\r\n def __str__(self):\r\n return self.user.username+\" Will buy \"+str(self.quantity) +\" OF \"+ self.item.title\r\n\r\n def total_price(self):\r\n if(self.item.discount_price):\r\n return self.item.discount_price * self.quantity\r\n return self.quantity*self.item.price;\r\n\r\n\r\nclass Order(models.Model):\r\n user = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)\r\n items = models.ManyToManyField(OrderItem)\r\n start_date = models.DateTimeField(auto_now_add=True)\r\n ordered_date = models.DateTimeField(auto_now_add=True)\r\n ordered = models.BooleanField(default=False)\r\n address = models.ForeignKey('Address',on_delete=models.SET_NULL,null=True,blank=True)\r\n payment = models.ForeignKey('Payment',on_delete=models.SET_NULL,null=True,blank=True)\r\n cupon = models.ForeignKey('Cupon',on_delete=models.SET_NULL,null=True,blank=True)\r\n being_delivered = models.BooleanField(default=False)\r\n received = models.BooleanField(default=False)\r\n\r\n def __str__(self):\r\n return self.user.username\r\n\r\n\r\n def cupon_discount(self):\r\n if(self.cupon):\r\n return self.actual_price()*(self.cupon.persent/100)\r\n return 0\r\n\r\n def actual_price(self):\r\n m=0;\r\n for i in self.items.all():\r\n m+=i.total_price()\r\n return m\r\n\r\n def total_price(self):\r\n return self.actual_price() - self.cupon_discount()\r\n\r\n\r\nclass Address(models.Model):\r\n user = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)\r\n street_address = models.CharField(max_length=100)\r\n apartment_address = models.CharField(max_length=100)\r\n country = models.CharField(max_length=100)\r\n Zip = models.CharField(max_length=100)\r\n default = models.BooleanField(default=False)\r\n\r\n def __str__(self):\r\n return \"{},{},{}\".format(self.street_address,self.apartment_address,self.country)\r\n\r\nclass Payment(models.Model):\r\n user = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)\r\n amount = models.FloatField()\r\n timestamp = models.DateTimeField(auto_now_add=True)\r\n stripe_charge_id = models.CharField(max_length=30,null=True,blank=True)\r\n paypal_charge_id = models.CharField(max_length=30,null=True,blank=True)\r\n\r\n def __str__(self):\r\n return \"{} : ({})\".format(self.user.username,self.amount)\r\n\r\nclass Cupon(models.Model):\r\n code = models.CharField(max_length=100)\r\n persent = models.IntegerField()\r\n num_times = models.IntegerField()\r\n valid = models.BooleanField(default=True)\r\n Global = models.BooleanField(default=True)\r\n\r\n def __str__(self):\r\n return self.code\r\n\r\n\r\nclass User_cupon(models.Model):\r\n user = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)\r\n cupon = models.ForeignKey(\"Cupon\",on_delete=models.CASCADE)\r\n times_used = models.IntegerField(default=0)\r\n\r\n\r\n\r\n def usable(self):\r\n return self.times_used None:\n self.p = p \n self.q = q \n self.r = r\n self.dom = dom\n self.arb,self.brb = (1,0,0),(1,0,0) #default boundary conditions\n self.exact = exact\n pass\n\n def discretize(self,N:int):\n self.N = N \n self.ddom = np.linspace(self.dom[0],self.dom[1],N+1)\n self.w = np.zeros(self.ddom.shape) \n #ddom,w = [0,1,2....N-1,N]\n self.h = abs(np.diff(self.ddom)[0])\n h,x = self.h,self.ddom\n self.d = vec(lambda i : 2 + h**2*self.p(x[i]))\n self.u = vec(lambda i : -1 + h/2*self.q(x[i]))\n self.l = vec(lambda i : -1 - h/2*self.q(x[i]))\n self.b = vec(lambda i : -h**2*self.r(x[i])) \n return self.ddom\n \n def set_dirichlet(self,a,b):\n self.set_robin((1,0,a),(1,0,b))\n\n def set_neumann(self,a,b):\n self.set_robin((0,1,a),(0,1,b))\n \n def set_robin(self,a=None,b=None): # a1 y(0) + a2 y'(0) = a3 ; b1 y(N) + b2y'(N) = b3 \n if a == (0,0,0) or b==(0,0,0):\n raise ValueError(\"Give appropriate Boundary conditions, (0,0,0) is nonsense.\")\n if a is not None:\n self.arb = a\n if b is not None:\n self.brb = b\n (a1,a2,a3),(b1,b2,b3) = self.arb,self.brb\n h,x = self.h,self.ddom\n b_,d,l,u,N = self.b,self.d,self.l,self.u,self.N\n if a2 == 0 :\n self.a11,self.a12 = 1,0\n self.b1 = a3/a1\n else:\n self.a11,self.a12 = d(0) + 2*h*l(0)*a1/a2,-2\n self.b1 = b_(0)+2*h*l(0)*a3/a2\n if b2 == 0 :\n self.ann,self.an_1n = 1,0\n self.bn = b3/b1\n else:\n self.ann,self.an_1n = d(N) - 2*h*u(N)*b1/b2 ,-2\n self.bn = b_(N)-2*h*l(N)*b3/b2\n \n def get_A_b(self):\n ii = np.arange(1,self.N)\n l_,d_ = np.zeros(self.N),np.zeros(self.N+1)\n u_,self.b_ = l_.copy(),d_.copy()\n l_[:-1],l_[-1] = self.l(ii), self.an_1n \n u_[1:],u_[0] = self.u(ii), self.a12 \n d_[1:-1],d_[0],d_[-1] = self.d(ii),self.a11,self.ann\n self.b_[1:-1],self.b_[0],self.b_[-1] = self.b(ii),self.b1,self.bn \n self.A = get_tridiag(l_,d_,u_)\n return self.A,self.b_\n\n def solve(self):\n A,b =self.get_A_b()\n soln = np.linalg.solve(A,b)\n anasoln = self.exact(self.ddom)\n rmse = np.sqrt((soln-anasoln)**2/self.N) \n return soln,rmse\n\n def plot_exact(self,ax):\n x_space = np.linspace(*self.dom)\n ax.plot(x_space,self.exact(x_space),label=\"Exact\")\n def plot_num(self,ax):\n ax.plot(self.ddom,np.linalg.solve(self.A,self.b_),\"1\",label=f\"$N={self.N}$\")\n \n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n from matplotlib import use\n plt.style.use(\"bmh\")\n bvp1 = ordinary_bvp(lambda x: np.pi**2,lambda x:0,lambda x:-2*np.pi**2*np.sin(np.pi*x),(0,1),lambda x : np.sin(np.pi*x))\n bvp1.discretize(5)\n bvp1.set_robin((1,0,0),(1,0,0))\n A,b = bvp1.get_A_b()\n \n fig1,ax1 = plt.subplots(1,1)\n bvp1.plot_exact(ax1)\n bvp1.plot_num(ax1)\n plt.legend()\n plt.show()\n\n bvp2 = ordinary_bvp(lambda x: -1,lambda x:0,lambda x:np.sin(3*x),(0,np.pi/2))\n bvp2.discretize(5)\n bvp2.set_robin((1,1,-1),(0,1,1))\n y2_exact = lambda x : 3/8*np.sin(x) - np.cos(x) - 1/8*np.sin(3*x) \n A,b = bvp2.get_A_b()\n print(A)\n print(b)","repo_name":"Dirac-sn/Assignment-11","sub_path":"shash_a11.py","file_name":"shash_a11.py","file_ext":"py","file_size_in_byte":3821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"23482698249","text":"#!/bin/python3\n\n#https://www.hackerrank.com/contests/projecteuler/challenges/euler002\n\ndef evenFibonacci(n):\n a=1\n b=2\n ans=0\n mode=1\n while 1:\n if mode:\n if a>n:\n return ans\n if a %2==0:\n ans+=a\n a=a+b\n mode=0\n else: \n if b>n:\n return ans\n if b %2==0:\n ans+=b\n b=a+b\n mode=1\n \nt = int(input().strip())\nfor _ in range(t):\n n = int(input().strip())\n print(evenFibonacci(n))\n","repo_name":"ntsd/my-solutions","sub_path":"Hackerrank/contest/ProjectEuler/ProjectEuler2-EvenFibonaccinumbers.py","file_name":"ProjectEuler2-EvenFibonaccinumbers.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"}
+{"seq_id":"37921265250","text":"from django import forms\n\nfrom .models import Site\n\nclass BaseForm(forms.Form):\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('label_suffix', '') # globally override the Django >=1.6 default of ':'\n super(BaseForm, self).__init__(*args, **kwargs)\n\nclass BaseModelForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('label_suffix', '') # globally override the Django >=1.6 default of ':'\n super(BaseModelForm, self).__init__(*args, **kwargs)\n\nclass SiteForm(BaseModelForm):\n name = forms.CharField(\n label='',\n max_length=50,\n widget=forms.TextInput(attrs={\n 'class': 'form-control',\n 'placeholder': 'Site Name',\n }),\n )\n url = forms.CharField(\n label='',\n max_length=200,\n widget=forms.TextInput(attrs={\n 'class': 'form-control',\n 'placeholder': 'URL',\n }),\n )\n github = forms.CharField(\n label='',\n max_length=200,\n widget=forms.TextInput(attrs={\n 'class': 'form-control',\n 'value': 'https://github.com/McCarthyCode/',\n 'placeholder': 'GitHub URL',\n }),\n )\n active = forms.BooleanField(\n initial=True,\n required=False,\n label='Website Is Active',\n widget=forms.CheckboxInput()\n )\n description = forms.CharField(\n label='',\n max_length=200,\n widget=forms.Textarea(attrs={\n 'class': 'form-control',\n 'placeholder': 'Description',\n 'rows': 5,\n }),\n )\n screenshots = forms.ImageField(\n label='Screenshots',\n required=False,\n widget=forms.ClearableFileInput(attrs={'multiple': True}),\n )\n\n def clean(self):\n super().clean()\n return self.cleaned_data\n\n class Meta:\n model = Site\n fields = ['name', 'url', 'github', 'active', 'description']\n","repo_name":"McCarthyCode/McCarthy-Code","sub_path":"home/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"42470259696","text":"from collective.elasticsearch.testing import ElasticSearch_API_TESTING\nfrom collective.elasticsearch.testing import ElasticSearch_REDIS_TESTING\nfrom collective.elasticsearch.tests import BaseAPITest\nfrom parameterized import parameterized_class\nfrom plone.app.testing import SITE_OWNER_NAME\nfrom plone.app.testing import SITE_OWNER_PASSWORD\nfrom plone.restapi.testing import RelativeSession\n\n\n@parameterized_class(\n [{\"layer\": ElasticSearch_API_TESTING}, {\"layer\": ElasticSearch_REDIS_TESTING}]\n)\nclass TestService(BaseAPITest):\n def setUp(self):\n super().setUp()\n self.portal = self.layer[\"portal\"]\n self.portal_url = self.portal.absolute_url()\n self.request = self.portal.REQUEST\n self.api_session = RelativeSession(self.portal_url)\n self.api_session.headers.update({\"Accept\": \"application/json\"})\n self.api_session.auth = (SITE_OWNER_NAME, SITE_OWNER_PASSWORD)\n\n def tearDown(self):\n self.api_session.close()\n\n def test_get(self):\n response = self.api_session.get(\"/@elasticsearch\")\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.headers.get(\"Content-Type\"), \"application/json\")\n\n results = response.json()\n self.assertEqual(results[\"@id\"], f\"{self.portal.absolute_url()}/@elasticsearch\")\n self.assertIn(\"Cluster Name\", results.keys())\n self.assertIn(\"Elastic Search Version\", results.keys())\n self.assertIn(\"Number of docs (Catalog)\", results.keys())\n self.assertIn(\"Index Name\", results.keys())\n self.assertIn(\"Number of docs\", results.keys())\n self.assertIn(\"Deleted docs\", results.keys())\n self.assertIn(\"Size\", results.keys())\n self.assertIn(\"Query Count\", results.keys())\n\n def test_post_convert(self):\n response = self.api_session.post(\"/@elasticsearch\", json={\"action\": \"convert\"})\n\n self.assertEqual(response.status_code, 204)\n\n def test_post_rebuild(self):\n response = self.api_session.post(\"/@elasticsearch\", json={\"action\": \"rebuild\"})\n\n self.assertEqual(response.status_code, 204)\n\n def test_post_invalid(self):\n response = self.api_session.post(\n \"/@elasticsearch\", json={\"action\": \"bad_action\"}\n )\n\n self.assertEqual(response.status_code, 400)\n\n def test_control_panel_registered(self):\n response = self.api_session.get(\"/@controlpanels\")\n data = response.json()\n titles = [panel[\"title\"] for panel in data]\n self.assertIn(\"Elastic search\", titles)\n\n def test_control_panel_schema(self):\n response = self.api_session.get(\"/@controlpanels/elasticsearch\")\n data = response.json()\n self.assertEqual(data[\"title\"], \"Elastic search\")\n self.assertEqual(data[\"group\"], \"Add-on Configuration\")\n self.assertTrue(data[\"data\"][\"enabled\"])\n self.assertTrue(\"enabled\", data[\"schema\"][\"fieldsets\"][0][\"fields\"])\n","repo_name":"collective/collective.elasticsearch","sub_path":"src/collective/elasticsearch/tests/test_services.py","file_name":"test_services.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"67"}
+{"seq_id":"7028277600","text":"from django.db import models\nfrom django.conf import settings\nimport os.path\nimport csv\n\nclass CountryManager(models.Manager):\n\n def import_countries(self):\n with open('data/countries.csv', encoding=\"utf-8\", errors='ignore') as csvfile:\n reader = csv.DictReader(csvfile)\n iterator = 0\n for row in reader:\n self.get_or_create(name=row['Country Name'], code=row['Country Code'], curr_rate=row['2017'])\n iterator += 1\n print(row)\n return iterator\n\nclass Country(models.Model):\n name = models.CharField(max_length=255)\n code = models.CharField(max_length=5, blank=True)\n curr_rate = models.FloatField(null=True, blank=True)\n gdp = models.FloatField(null=True, blank=True)\n num_smokers = models.FloatField(null=True, blank=True)\n rate = models.FloatField(null=True, blank=True)\n year = models.IntegerField(null=True, blank=True)\n co2 = models.FloatField(null=True, blank=True)\n\n\n def __str__(self):\n return self.name\n\n class Meta(object):\n verbose_name_plural = 'countries'\n\n # def save(self, *args, **kwargs):\n # super(Country, self).save(*args, **kwargs)\n\n # def get_project_url(self, *args, **kwargs):\n # return reverse('project-create') + '?country={}'.format(self.id)\n\n","repo_name":"leonbi100/heart_beat","sub_path":"beat_site/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"43550012309","text":"from argparse import ArgumentParser\nimport yaml\nfrom alchemist.laboratory import Laboratory\n\n\ndef process():\n \"\"\"\n \"\"\"\n parser = ArgumentParser(description=\"\")\n\n # optional argument, when passed the value True is stored.\n parser.add_argument('--reactions', '-r', action=\"store_true\",\n help=\"returns the number of reactions.\")\n\n # mandatory argument, which holds the laboratory in a yaml file\n parser.add_argument('yaml_file', help=\"laboratory stored as a yaml\"\n + \"file\")\n\n arguments = parser.parse_args()\n\n dict_lab = yaml.load(open(arguments.yaml_file))\n\n # instantiates a new laboratory object using the dictionary from\n # the yaml_file arguemnet\n lab = Laboratory(dict_lab)\n\n if(arguments.reactions):\n print(lab.run_full_experiment(arguments.reactions))\n else:\n lab.run_full_experiment(arguments.reactions)\n print(yaml.dump({'lower': lab.lower, 'upper': lab.upper}))\n\nif __name__ == '__main__':\n process()\n","repo_name":"prawnrao/rsd-exam-prep","sub_path":"cw1/15008121/alchemist/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"74208677653","text":"from types import SimpleNamespace\nimport requests\nimport json\n\ndef sendRequestToApi(session, post_args, method):\n \"\"\"Отправляет POST запрос с указанными параметрами\"\"\"\n post_data = json.loads(\n session.post(\"https://api.keksik.io/\"+method,\n headers={'Content-Type': 'application/json'},\n json=post_args).text,\n object_hook=lambda d: SimpleNamespace(**d)\n )\n \n if post_data.success:\n return post_data.list\n else:\n raise Exception(post_data.error, post_data.msg)\n\nclass KeksikApi:\n def __init__(self,\n group_id=None,\n token=None,\n v=1):\n # Обязательные обьекты\n self.group_id = abs(group_id)\n self.token = str(token)\n self.v = v\n # HTTP сессия\n self.Session = requests.Session()\n \n # Классы\n self.donates = self.donates(self.group_id, self.token, self.v, self.Session)\n self.campaigns = self.campaigns(self.group_id, self.token, self.v, self.Session)\n self.payments = self.payments(self.group_id, self.token, self.v, self.Session)\n \n class donates:\n\n def __init__(self,\n group_id=None,\n token=None,\n v=1,\n session=None):\n self.group_id = abs(group_id)\n self.token = str(token)\n self.v = v\n self.Session = session\n \n\n def get(self,\n length=20,\n offset=None,\n start_date=None,\n end_date=None,\n sort=None,\n reverse=None\n ):\n post_args = {\n 'group':self.group_id,\n 'token':self.token,\n 'v':self.v,\n 'len':length,\n 'offset':offset,\n 'start_date':start_date,\n 'end_date':end_date,\n 'sort':sort,\n 'reverse':reverse\n }\n return sendRequestToApi(self.Session, post_args, 'donates/get')\n \n def getLast(self, last=None):\n post_args = {\n 'group':self.group_id,\n 'token':self.token,\n 'v':self.v,\n 'last':last\n }\n\n return sendRequestToApi(self.Session, post_args, 'donates/get-last')\n \n def changeStatus(self, id, status):\n post_args = {\n 'group':self.group_id,\n 'token':self.token,\n 'v':self.v,\n 'id':id,\n 'status':status\n }\n\n return sendRequestToApi(self.Session, post_args, 'donates/change-status')\n \n def answer(self, id, answer):\n post_args = {\n 'group':self.group_id,\n 'token':self.token,\n 'v':self.v,\n 'id':id,\n 'answer':answer\n }\n\n return sendRequestToApi(self.Session, post_args, 'donates/change-status')\n \n def changeRewardStatus(self, id, status):\n post_args = {\n 'group':self.group_id,\n 'token':self.token,\n 'v':self.v,\n 'id':id,\n 'status':status\n }\n\n return sendRequestToApi(self.Session, post_args, 'donates/change-status')\n \n class campaigns:\n\n def __init__(self,\n group_id=None,\n token=None,\n v=1,\n session=None):\n self.group_id = abs(group_id)\n self.token = str(token)\n self.v = v\n self.Session = session\n \n \n def get(self, ids=None):\n post_args = {\n 'group':self.group_id,\n 'token':self.token,\n 'v':self.v,\n 'ids':ids\n }\n\n return sendRequestToApi(self.Session, post_args, 'campaigns/get')\n \n def getActive(self):\n post_args = {\n 'group':self.group_id,\n 'token':self.token,\n 'v':self.v\n }\n\n return sendRequestToApi(self.Session, post_args, 'campaigns/get-active')\n \n def getRewards(self, campaign):\n post_args = {\n 'group':self.group_id,\n 'token':self.token,\n 'v':self.v,\n 'campaign':campaign\n }\n\n return sendRequestToApi(self.Session, post_args, 'campaigns/get-rewards')\n \n def change(self,\n id,\n title=None,\n status=None,\n end=None,\n point=None,\n start_received=None,\n start_backers=None\n ):\n post_args = {\n 'group':self.group_id,\n 'token':self.token,\n 'v':self.v,\n 'id':id,\n 'title':title,\n 'status':status,\n 'end':end,\n 'point':point,\n 'start_received':start_received,\n 'start_backers':start_backers\n }\n\n return sendRequestToApi(self.Session, post_args, 'campaigns/change')\n \n def changeReward(self,\n id,\n title=None,\n desc=None,\n min_donate=None,\n limits=None,\n status=None\n ):\n post_args = {\n 'group':self.group_id,\n 'token':self.token,\n 'v':self.v,\n 'id':id,\n 'title':title,\n 'desc':desc,\n 'min_donate':min_donate,\n 'limits':limits,\n 'status':status\n }\n\n return sendRequestToApi(self.Session, post_args, 'campaigns/change-reward')\n \n class payments:\n\n def __init__(self,\n group_id=None,\n token=None,\n v=1,\n session=None):\n self.group_id = abs(group_id)\n self.token = str(token)\n self.v = v\n self.Session = session\n \n \n def get(self, ids=None):\n post_args = {\n 'group':self.group_id,\n 'token':self.token,\n 'v':self.v,\n 'ids':ids\n }\n\n return sendRequestToApi(self.Session, post_args, 'payments/get')\n \n def create(self, \n system,\n purse,\n amount,\n name=None\n ):\n post_args = {\n 'group':self.group_id,\n 'token':self.token,\n 'v':self.v,\n 'system':system,\n 'purse':purse,\n 'name':name,\n 'amount':amount\n }\n\n return sendRequestToApi(self.Session, post_args, 'payments/create')\n \n def balance(self, ):\n post_args = {\n 'group':self.group_id,\n 'token':self.token,\n 'v':self.v,\n }\n\n return sendRequestToApi(self.Session, post_args, 'balance')","repo_name":"Friendosie/pyKeksik","sub_path":"pyKeksik/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":7286,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"}
+{"seq_id":"6492155357","text":"import visa\r\nimport numpy as np\r\nfrom math import log\r\nimport csv\r\nimport sys\r\n\r\nfrom enum import Enum\r\nclass State(Enum):\r\n OFF = 0\r\n ON = 1\r\n\r\nclass e4980al:\r\n def __init__(self, auto_choose = False):\r\n self.e4980al_usb_id = \"0x0957::0x0909\"\r\n instrument_visa_identifier = self.select_instrument(auto_choose)\r\n if instrument_visa_identifier is None:\r\n raise Exception\r\n self.init_instrument(instrument_visa_identifier)\r\n\r\n self.bias = 0.0\r\n\r\n def select_instrument(self, auto_choose):\r\n found_count = 0\r\n rm = visa.ResourceManager()\r\n reslist = rm.list_resources()\r\n e4980al_dict = {}\r\n for r in reslist:\r\n if self.e4980al_usb_id in r:\r\n e4980al_dict[found_count] = r\r\n found_count += 1\r\n if found_count == 0:\r\n visa_address = None\r\n elif auto_choose or found_count == 1:\r\n visa_address = e4980al_dict[0]\r\n print(\"Auto selected instument: {}\".format(visa_address))\r\n else:\r\n for idx, r in e4980al_dict.items():\r\n print(\"({idx}), {res_name}\".format(idx=idx+1, res_name=r))\r\n i = int(input(\"Which instument?: \")) - 1\r\n visa_address = e4980al_dict[i]\r\n print(\"Selected instument: {}\".format(visa_address))\r\n\r\n return visa_address\r\n\r\n\r\n\r\n def init_instrument(self, instrument_visa_identifier):\r\n rm = visa.ResourceManager()\r\n my_instrument = rm.open_resource(instrument_visa_identifier)\r\n self.inst = my_instrument\r\n\r\n def meas_type(self, meas_type):\r\n #\r\n # Syntax:\r\n # :FUNCtion:IMPedance[:TYPE]\r\n # {CPD|CPQ|CPG|CPRP|CSD|CSQ|CSRS|LPD|LPQ|LPG|LPRP|LPRD|LSD|LSQ|LSRS|LSRD|RX|ZTD|ZTR|GB|YTD|YTR|VDID}\r\n\r\n if self.meas_type_valid(meas_type):\r\n print(\"Measurement type: \" + meas_type.upper())\r\n self.write(\":FUNC:IMP {}\".format(meas_type.upper()))\r\n else:\r\n print(\"INVALID! measurement type: \" + meas_type.upper(), file=sys.stderr)\r\n\r\n def meas_type_valid(self, meas_type):\r\n available_meas_types = [\"CPD\", \"CPQ\", \"CPG\", \"CPRP\", \"CSD\", \"CSQ\", \"CSRS\", \"LPD\", \"LPQ\", \"LPG\", \"LPRP\", \"LPRD\", \"LSD\", \"LSQ\", \"LSRS\",\r\n \"LSRD\", \"RX\", \"ZTD\", \"ZTR\", \"GB\", \"YTD\", \"YTR\", \"VDID\"]\r\n if meas_type.upper() in available_meas_types:\r\n return True\r\n else:\r\n return False\r\n\r\n def set_meas_freq(self, hz):\r\n if 20 <= hz and hz <= 1e6:\r\n self.inst.write(\":FREQ:CW {}\".format(hz))\r\n else:\r\n print(\"INVALID! Out of range\", file=sys.stderr)\r\n\r\n def set_meas_voltage(self, voltage):\r\n if self.bias + 0.0 <= voltage and voltage <= self.bias + 2.0:\r\n self.inst.write(\":VOLT:LEV {}\".format(voltage))\r\n else:\r\n print(\"INVALID! Out of range\", file=sys.stderr)\r\n\r\n def set_bias_voltage(self, voltage):\r\n self.bias = voltage\r\n self.inst.write(\":BIAS:VOLT:LEV {}\".format(voltage))\r\n\r\n def set_trig_mode(self, trig_mode):\r\n # Syntax\r\n # :TRIGger:SOURce {INTernal|HOLD|EXTernal|BUS}\r\n # :TRIGger:SOURce?\r\n self.inst.write(\":TRIG:SOUR {}\".format(trig_mode))\r\n\r\n def trigger(self):\r\n # Syntax\r\n # TRIGger[:IMMediate]\r\n self.inst.write(\":TRIG:IMM\")\r\n\r\n def aperture(self, time, average):\r\n # Syntax\r\n # :APERture {SHORt | MEDium | LONG}, < numeric >\r\n # :APERture?\r\n self.inst.write(\":APER {},{}\".format(time,average))\r\n\r\n def clear_display(self):\r\n self.inst.write(\":DISP:CCL\")\r\n\r\n def enable_display(self, state):\r\n # Syntax:\r\n # DISPlay:ENABle {ON | OFF | 1 | 0}\r\n # :DISPlay: ENABle?\r\n self.inst.write(\":DISP:ENAB {}\".format(state))\r\n\r\n def set_comment(self, comment):\r\n # Syntax:\r\n # :DISPlay:LINE \r\n # :DISPlay:LINE?\r\n if len(comment) <= 30:\r\n self.inst.write(\":DISP:LINE \\\"{}\\\"\".format(comment))\r\n else:\r\n print(\"INVALID! Max. 30 ASCII chars allowed\", file=sys.stderr)\r\n\r\n def disp_page(self, page):\r\n '''\r\n Syntax: DISPlay:PAGE\r\n :MEASurement | BNUMber | BCOunt | LIST | MSETup | CSETup | LTABle | LSETup | CATAlog | SYSTem | SELF | MLARge | SCONfig | SERVice}\r\n :DISPlay: PAGE?\r\n\r\n MEASurement (Preset value) Sets displayed page to \r\n BNUMber Sets displayed page to \r\n BCOunt Sets displayed page to \r\n LIST Sets displayed page to \r\n MSETup Sets displayed page to \r\n CSETup Sets displayed page to \r\n LTABle Sets displayed page to \r\n LSETup Sets displayed page to \r\n CATAlog Sets displayed page to \r\n SYSTem Sets displayed page to \r\n SELF Sets display page to \r\n MLARge Sets page to display measurement results in large\r\n characters\r\n SCONfig Sets displayed page to \r\n SERVice Sets displayed page to \r\n\r\n :param page:\r\n :return:\r\n '''\r\n self.inst.write(\":DISP:PAGE {}\".format(page))\r\n\r\n\r\n def get_log_list(self, from_val, to_val, number_of_steps):\r\n base = 10.0\r\n logsp = np.logspace(log(from_val, base), log(to_val, base), num=number_of_steps, endpoint=True, base=base,\r\n dtype=None)\r\n return logsp\r\n\r\n def get_lin_list(self, from_val, to_val, number_of_steps):\r\n linsp = np.linpace(from_val, to_val, num=number_of_steps, endpoint=True, dtype=None)\r\n return linsp\r\n\r\n\r\n def fetch(self):\r\n str = self.inst.query(\":FETC?\")\r\n l = str.split(\",\")\r\n l = list(map(float, l))\r\n return l\r\n\r\n def beep_type(self, beep_t):\r\n self.inst.query(\":SYSTem:BEEPer:TONE {}\".format(beep_t))\r\n\r\n def beep_enable(self, state):\r\n # Syntax:\r\n # :SYSTem:BEEPer: STATe {ON | OFF | 1 | 0}\r\n # :SYSTem: BEEPer:STATe?\r\n self.inst.query(\":SYSTem:BEEPer:STATe {}\".format(state))\r\n\r\n def beep(self):\r\n self.inst.write(\":SYSTem:BEEPer:IMMediate\")\r\n\r\n def meas_point(self, hz, voltage):\r\n self.set_meas_freq(hz)\r\n str = self.inst.query(\":FETC?\")\r\n l = str.split(\",\")\r\n l = list(map(float, l))\r\n m1 = l[0]\r\n m2 = l[1]\r\n return hz, m1, m2\r\n\r\n\r\n\r\n def manual_list_measure(self, meas_type, from_hz, to_hz, steps):\r\n l = []\r\n self.meas_type(meas_type)\r\n for i in self.get_log_list(from_hz,to_hz,steps):\r\n m = self.meas_point(i)\r\n l.append(m)\r\n\r\n return l\r\n\r\ndef save_as_csv(list_of_results, filename, header=None):\r\n with open(filename, \"w\") as f:\r\n csv_out = csv.writer(f)\r\n if header is not None:\r\n csv_out.writerow(header)\r\n for row in list_of_results:\r\n csv_out.writerow(row)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n muszer = e4980al()\r\n x = muszer.manual_list_measure(\"CSRS\", 20, 1e6, 200)\r\n save_as_csv(x, \"teszt.csv\")\r\n\r\n\r\n","repo_name":"mitle/keysight_e4980al","sub_path":"keysight_e4980al.py","file_name":"keysight_e4980al.py","file_ext":"py","file_size_in_byte":7214,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"}
+{"seq_id":"34243755563","text":"def rev(x: int):\n rev = 0\n while x > 0:\n rev = rev * 10 + x % 10\n x //= 10\n return rev\n\ni, j, k = [int(x) for x in input().split()]\nc = 0\n\nfor day in range(i, j + 1):\n temp = day - rev(day)\n if temp % k == 0: c += 1\n\nprint(c)\n","repo_name":"AmolOnGitHub/PythonPrograms","sub_path":"HackerRank/ProblemSolving/beautifulDays.py","file_name":"beautifulDays.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"28558034306","text":"trash_words = set(['i', 'pa', 'te', 'ni', 'niti', 'a', 'ali', 'nego', 'no', 'ili'])\n\ns = input().split()\nanswer = s[0][0]\n\nfor word in s[1:]:\n if word in trash_words: continue\n answer += word[0]\n\nprint(answer.upper())\n","repo_name":"NeoMindStd/CodingLife","sub_path":"baekjoon/3181py/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"70691615574","text":"import cv2\nimport math\nimport numpy as np\nimport sys\n\nimport alphashape\nfrom rtree import index\nfrom shapely.geometry import Polygon\nfrom shapely.geometry import LineString\nfrom shapely.ops import split\n\nfrom table_recognition.graph.colorers.utils import get_multiple_values_from_dict\n\n\nclass GeometryGraphColorer(object):\n def __init__(self, graph):\n self.graph = graph\n self.img_height, self.img_width, _ = cv2.imread(self.graph.img_path).shape\n\n self.rtree_index = index.Index()\n self.rtree_index_2_node = {}\n\n def color_graph(self):\n self.build_rtree()\n self.color_nodes()\n self.color_edges()\n\n def color_nodes(self):\n for node in self.graph.nodes:\n # Bounding box center\n x, y = node.bbox[\"center\"]\n position = [x / self.img_width, y / self.img_height]\n\n # Bounding box width and height\n [(min_x, min_y), (max_x, max_y)] = node.bbox[\"corners\"]\n bbox_width = abs(max_x - min_x) / self.img_width\n bbox_height = abs(max_y - min_y) / self.img_height\n bbox_dimensions = [bbox_width, bbox_height]\n\n node.input_feature_vector = position + bbox_dimensions\n\n def color_edges(self):\n # TODO - Cleanup edge coloring for GeometryGraphColorer\n for edge in self.graph.edges:\n # Center of node1\n node1_x, node1_y = edge.node1.bbox[\"center\"]\n node1_x, node1_y = node1_x / self.img_width, node1_y / self.img_height\n\n # Center of node2\n node2_x, node2_y = edge.node2.bbox[\"center\"]\n node2_x, node2_y = node2_x / self.img_width, node2_y / self.img_height\n\n # -- Feature1: Distance of the two centers -----------------------------------------------------------------\n distance = np.linalg.norm(np.array([node1_x, node1_y]) - np.array([node2_x, node2_y]))\n # ----------------------------------------------------------------------------------------------------------\n\n # -- Feature2: Average of the two centers ------------------------------------------------------------------\n avg_position_x, avg_position_y = (node1_x + node2_x) / 2, (node1_y + node2_y) / 2\n # ----------------------------------------------------------------------------------------------------------\n\n # -- Feature4: Orientation of the edge ---------------------------------------------------------------------\n right_node = edge.node1 if edge.node1.bbox[\"center\"][0] > edge.node2.bbox[\"center\"][0] else edge.node2\n left_node = edge.node2 if edge.node1.bbox[\"center\"][0] > edge.node2.bbox[\"center\"][0] else edge.node1\n right_node_x, right_node_y = right_node.bbox[\"center\"]\n left_node_x, left_node_y = left_node.bbox[\"center\"]\n\n x_distance = right_node_x - left_node_x\n y_distance = abs(right_node_y - left_node_y)\n orientation = y_distance / (x_distance + sys.float_info.epsilon)\n orientation = math.degrees(math.atan(orientation))\n\n if (right_node_y - left_node_y) > 0:\n orientation = 90 + (90 - orientation)\n\n if orientation < 5 or orientation > 175:\n orientation = 180\n # ----------------------------------------------------------------------------------------------------------\n\n # -- Feature5: Vertical and horizontal overlap ------------------------------------------------------------\n [(node1_min_x, node1_min_y), (node1_max_x, node1_max_y)] = edge.node1.bbox[\"corners\"]\n [(node2_min_x, node2_min_y), (node2_max_x, node2_max_y)] = edge.node2.bbox[\"corners\"]\n\n # a) vertical overlap\n x_max = min(node1_max_x, node2_max_x)\n x_min = max(node1_min_x, node2_min_x)\n x_overlap = (x_max - x_min) if (x_max - x_min) > 0 else 0\n\n # b) horizontal overlap\n y_max = min(node1_max_y, node2_max_y)\n y_min = max(node1_min_y, node2_min_y)\n y_overlap = (y_max - y_min) if (y_max - y_min) > 0 else 0\n # ---------------------------------------------------------------------------------------------------------\n\n # -- Feature5: Calculate overlap in percentage ------------------------------------------------------------\n if x_overlap > 0:\n ys = [node1_min_y, node2_min_y, node1_max_y, node2_max_y]\n ys.sort()\n bbox_in_between = (x_min, ys[1], x_max, ys[2])\n nodes_intersections_idx = list(self.rtree_index.intersection(bbox_in_between))\n\n if edge.node1.id in nodes_intersections_idx:\n nodes_intersections_idx.remove(edge.node1.id)\n if edge.node2.id in nodes_intersections_idx:\n nodes_intersections_idx.remove(edge.node2.id)\n nodes = get_multiple_values_from_dict(self.rtree_index_2_node, nodes_intersections_idx)\n\n bbox_in_between_set = set(range(x_min, x_max+1))\n for node in nodes:\n iter_node_x_max = node.bbox[\"rtree\"][2]\n iter_node_x_min = node.bbox[\"rtree\"][0]\n iter_node_set = set(range(iter_node_x_min, iter_node_x_max+1))\n bbox_in_between_set = bbox_in_between_set - iter_node_set\n\n bbox_in_between_list = list(bbox_in_between_set)\n bbox_in_between_list.sort()\n\n if len(bbox_in_between_list) <= 0:\n x_overlap = 0\n else:\n x_overlap = bbox_in_between_list[-1] - bbox_in_between_list[0]\n x_min_side = min(abs(node1_max_x - node1_min_x), abs(node2_max_x - node2_min_x))\n x_overlap = x_overlap / x_min_side\n\n if (x_max - x_min) < 0:\n x_overlap = 0\n\n if y_overlap > 0:\n xs = [node1_min_x, node2_min_x, node1_max_x, node2_max_x]\n xs.sort()\n bbox_in_between = (xs[1], y_min, xs[2], y_max)\n\n nodes_intersections_idx = list(self.rtree_index.intersection(bbox_in_between))\n if edge.node1.id in nodes_intersections_idx:\n nodes_intersections_idx.remove(edge.node1.id)\n if edge.node2.id in nodes_intersections_idx:\n nodes_intersections_idx.remove(edge.node2.id)\n nodes = get_multiple_values_from_dict(self.rtree_index_2_node, nodes_intersections_idx)\n\n bbox_in_between_set = set(range(y_min, y_max + 1))\n\n for node in nodes:\n iter_node_y_max = node.bbox[\"rtree\"][3]\n iter_node_y_min = node.bbox[\"rtree\"][1]\n iter_node_set = set(range(iter_node_y_min, iter_node_y_max + 1))\n bbox_in_between_set = bbox_in_between_set - iter_node_set\n\n bbox_in_between_list = list(bbox_in_between_set)\n bbox_in_between_list.sort()\n\n if len(bbox_in_between_list) <= 0:\n y_overlap = 0\n else:\n y_overlap = bbox_in_between_list[-1] - bbox_in_between_list[0]\n\n y_min_side = min(abs(node1_max_y - node1_min_y), abs(node2_max_y - node2_min_y))\n y_overlap = y_overlap / y_min_side\n # ----------------------------------------------------------------------------------------------------------\n\n # polygons_see_each_other = int(self.polygons_see_each_other(edge.node1, edge.node2))\n\n edge.input_feature_vector = [float(distance)] + \\\n [avg_position_x, avg_position_y] + \\\n [orientation] + \\\n [x_overlap, y_overlap]\n # [polygons_see_each_other]\n\n def build_rtree(self):\n for node in self.graph.nodes:\n self.rtree_index.insert(node.id, node.bbox[\"rtree\"])\n self.rtree_index_2_node[node.id] = node\n\n def polygons_see_each_other(self, node1, node2):\n \"\"\"\n Function that checks whether two polygons see each other (meaning there is not\n a point in polygon1 nor polygon2 from which it is possible to \"see\" any point\n from the other polygon)\n\n :param node1: Points that define the first polygon\n :param node2: Points that define the second polygon\n :return: True if polygons see each other\n False if polygons do not see each other\n \"\"\"\n polygon1 = node1.bbox[\"polygon\"]\n polygon2 = node2.bbox[\"polygon\"]\n\n # Create polygon that represents the convex hull of polygon1 and polygon2\n hull_points = polygon1 + polygon2\n\n # Source:\n # https://stackoverflow.com/questions/10846431/ordering-shuffled-points-that-can-be-joined-to-form-a-polygon-in-python\n alpha_shape = alphashape.alphashape(hull_points, 0.5)\n hull_polygon = Polygon(alpha_shape)\n\n max_x = max(hull_points, key=lambda item: item[0])[0]\n max_y = max(hull_points, key=lambda item: item[1])[1]\n min_x = min(hull_points, key=lambda item: item[0])[0]\n min_y = min(hull_points, key=lambda item: item[1])[1]\n hull_points_bbox = (min_x, min_y, max_x, max_y)\n\n nodes_test = list(self.rtree_index.intersection((0, 0, 10000, 10000)))\n\n # Find nodes that intersect created hull\n nodes_intersections_idx = list(self.rtree_index.intersection(hull_points_bbox))\n if node1.id in nodes_intersections_idx:\n nodes_intersections_idx.remove(node1.id)\n if node2.id in nodes_intersections_idx:\n nodes_intersections_idx.remove(node2.id)\n\n nodes = get_multiple_values_from_dict(self.rtree_index_2_node, nodes_intersections_idx)\n\n for node in nodes:\n # Split the hull polygon using the node polygon\n node_linestring = LineString(node.bbox[\"polygon\"] + [node.bbox[\"polygon\"][0]])\n hull_polygon_splitted = split(hull_polygon, node_linestring).geoms\n\n # Check how many new polygons were created by splitting the hull_polygon\n if len(hull_polygon_splitted) >= 2:\n # If there are more than 2 new polygons => the nodes do not see each other\n return False\n elif len(hull_polygon_splitted) <= 1:\n continue\n else:\n # Else find the cut hull polygon in the output of split() and continue the process\n node_polygon = Polygon(node.bbox[\"polygon\"])\n fst_intersection = node_polygon.intersection(hull_polygon_splitted[0]).area / node_polygon.area\n hull_polygon = hull_polygon_splitted[0] if fst_intersection > 0.9 else hull_polygon_splitted[1]\n\n return True\n","repo_name":"lpiwowar/table-recognition","sub_path":"table_recognition/graph/colorers/geometry_graph_colorer.py","file_name":"geometry_graph_colorer.py","file_ext":"py","file_size_in_byte":10952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"36931302279","text":"import cv2\nimport numpy as np\nimport os \n#socket\nimport socket\nimport threading\n\n#GPIO------------------------------------------------------------------------------------------------------------------\nimport RPi.GPIO as GPIO\nimport time\nOFFSE_DUTY = 0.5 #define pulse offset of servo\nSERVO_MIN_DUTY = 2.5+OFFSE_DUTY #define pulse duty cycle for minimum angle of servo\nSERVO_MAX_DUTY = 12.5+OFFSE_DUTY #define pulse duty cycle for maximum angle of servo\nservoPin = 12\nbuttonPin = 11 # define buttonPin\n\ncell_phone_count = 0\n\ndef map( value, fromLow, fromHigh, toLow, toHigh): # map a value from one range to another range\n return (toHigh-toLow)*(value-fromLow) / (fromHigh-fromLow) + toLow\n\ndef setup():\n global p\n GPIO.setmode(GPIO.BOARD) # use PHYSICAL GPIO Numbering\n GPIO.setup(servoPin, GPIO.OUT) # Set servoPin to OUTPUT mode\n GPIO.output(servoPin, GPIO.LOW) # Make servoPin output LOW level\n\n p = GPIO.PWM(servoPin, 50) # set Frequece to 50Hz\n p.start(0) # Set initial Duty Cycle to 0\n GPIO.setup(buttonPin, GPIO.IN, pull_up_down=GPIO.PUD_UP) # set buttonPin to PULL UP INPUT mode \n\ndef servoWrite(angle): # make the servo rotate to specific angle, 0-180 \n if(angle<0):\n angle = 0\n elif(angle > 35):\n angle = 35\n p.ChangeDutyCycle(map(angle,0,180,SERVO_MIN_DUTY,SERVO_MAX_DUTY)) # map the angle to duty cycle and output it\n\ndef destroy():\n p.stop()\n GPIO.cleanup()\n\ndef loop():\n for dc in range(0, 36, 1): # make servo rotate from 0 to 180 deg\n servoWrite(dc) # Write dc value to servo\n time.sleep(0.001)\n time.sleep(5.0)\n for dc in range(35, -1, -1): # make servo rotate from 180 to 0 deg\n servoWrite(dc)\n time.sleep(0.001)\n time.sleep(1.0)\n\ndef call_cellphone():\n global cell_phone_count\n while True:\n coon, addr = server.accept()\n clientMessage = str(coon.recv(1024), encoding='utf-8')\n\n print('Client message is:', clientMessage)\n\n serverMessage = 'I\\'m here'\n coon.sendall(serverMessage.encode())\n coon.close()\n cell_phone_count = 1\n time.sleep(5.0)\n cell_phone_count = 0\n\n#GPIO------------------------------------------------------------------------------------------------------------------\n\nrecognizer = cv2.face.LBPHFaceRecognizer_create()\nrecognizer.read('trainer/trainer.yml')\ncascadePath = \"Cascades/haarcascade_frontalface_default.xml\"\nfaceCascade = cv2.CascadeClassifier(cascadePath);\nfont = cv2.FONT_HERSHEY_SIMPLEX\n#iniciate id counter\nid = 0\n# names related to ids: example ==> Marcelo: id=1, etc\nnames = ['None', 'Marcelo', 'Kevin', 'Ilza', 'Z', 'W'] \n# Initialize and start realtime video capture\ncam = cv2.VideoCapture(0)\ncam.set(3, 640) # set video widht\ncam.set(4, 480) # set video height\n# Define min window size to be recognized as a face\nminW = 0.1*cam.get(3)\nminH = 0.1*cam.get(4)\n\nbuttonflag = 1\nface_control = 0\nsetup()\n\n#service setting\nHOST = '192.168.0.26'\nPOST = 8000\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind((HOST, POST))\nserver.listen(10)\n\nthread = threading.Thread(target=call_cellphone)\nthread.start()\n\nwhile True:\n if(buttonflag == 0):\n setup()\n buttonflag = 1\n if (GPIO.input(buttonPin)==GPIO.LOW or cell_phone_count == 1): # if button is pressed\n loop()\n destroy()\n buttonflag = 0\n\n ret, img =cam.read()\n #img = cv2.flip(img, -1) # Flip vertically\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n faces = faceCascade.detectMultiScale( \n gray,\n scaleFactor = 1.2,\n minNeighbors = 5,\n minSize = (int(minW), int(minH)),\n )\n for(x,y,w,h) in faces:\n cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)\n id, confidence = recognizer.predict(gray[y:y+h,x:x+w])\n # Check if confidence is less them 100 ==> \"0\" is perfect match \n if (confidence < 100):\n id = names[id]\n confidence = \" {0}%\".format(round(100 - confidence))\n print(id)\n print(confidence)\n#GPIO----------------------------------------------------------------------------------\n if (id == \"Kevin\"):\n face_control +=1\n if(face_control == 10):\n print ('open door')\n face_control = 0\n loop()\n destroy()\n buttonflag = 0\n face_control =0\n \n#GPIO----------------------------------------------------------------------------------\n else:\n id = \"unknown\"\n confidence = \" {0}%\".format(round(100 - confidence))\n\n cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2)\n cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1) \n\n cv2.imshow('camera',img) \n k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video\n if k == 27:\n break\n# Do a bit of cleanup\nprint(\"n [INFO] Exiting Program and cleanup stuff\")\ncam.release()\ncv2.destroyAllWindows()\n","repo_name":"stewedegg/Facial-recognition-door-lock","sub_path":"camera_locker.py","file_name":"camera_locker.py","file_ext":"py","file_size_in_byte":5113,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"11606181470","text":"import pandas as pd \nimport numpy as np \nfrom datetime import datetime, timedelta\nimport time\nimport bcolz\nimport redis\nimport pickle\nimport os, sys\nfrom dataApi import DataApi\nfrom dataGlovar import DataPath\n\n\t\t\nclass DataBcolz(object):\n\t# 行情数据模块:bcolz存储冷数据,redis缓存热数据\n\tdef __init__(self):\n\t\tself.redis_conn = redis.StrictRedis(host='127.0.0.1')\n\t\tself.dataApi = DataApi()\n\t\tself.init_params()\n\t\tself.check_today_data()\n\n\tdef init_params(self):\n\t\tself.time_list = []\n\n\t@staticmethod\n\tdef to_redis(redis_conn, now_data, security_list, now_time, prefix=''):\n\t\t# 数据字段:'date','factor','open','high','low','close','volume','high_limit','low_limit','paused'\t\t\n\t\tnow_time = prefix+str(now_time)\n\t\tredis_conn.set(now_time, pickle.dumps([now_data, security_list]))\n\t\tredis_conn.expire(now_time, 60*60*24)\n\n\t@staticmethod\n\tdef get_today_date_list():\n\t\t# 获取当天交易时间列表\n\t\tt1 = pd.to_datetime(datetime.now().strftime('%Y-%m-%d')+' 09:31:00')\n\t\tt2 = pd.to_datetime(datetime.now().strftime('%Y-%m-%d')+' 11:30:00')\n\t\tt3 = pd.to_datetime(datetime.now().strftime('%Y-%m-%d')+' 13:01:00')\n\t\tt4 = pd.to_datetime(datetime.now().strftime('%Y-%m-%d')+' 15:00:00')\n\t\tt5 = pd.to_datetime(datetime.now().strftime('%Y-%m-%d %H:%M')+':00')\n\t\tdate_list = []\n\t\tc = t1\n\t\tfor i in range(480):\n\t\t\tdate_list.append(c)\n\t\t\tc += timedelta(seconds=60)\n\t\tdate_list = [i for i in date_list if ((t1<=i<=t2 or t3<=i<=t4) and i<=t5)]\n\t\treturn date_list\n\n\t@staticmethod\n\tdef get_today_data(redis_conn, prefix=''):\n\t\t# 获取当天数据\n\t\ttoday_data = [] \n\t\ttoday_date_list = DataBcolz.get_today_date_list()\n\t\tfor now_time in today_date_list:\n\t\t\tnow_time = prefix+now_time.strftime('%Y-%m-%d %H:%M:%S')\n\t\t\tif now_time.encode('utf-8') in redis_conn.keys():\n\t\t\t\tnow_data, security_list = pickle.loads(redis_conn.get(now_time))\n\t\t\t\ttoday_data.append(now_data)\n\t\treturn np.array(today_data), list(security_list)\n\n\t@staticmethod\n\tdef get_path(security, unit='1m'):\n\t\t# 存储路径设置\t\n\t\tif [security[:3], security[-1]] in [['300','E']] or [security[0], security[-1]] in [['6','G'],['0','E']]:\n\t\t\tparam1 = 'stock'\n\t\t\tparam2 = security[4:6]\t\t\n\t\telif [security[:3], security[-1]] in [['399','E']] or [security[0], security[-1]] in [['0','G']]:\n\t\t\tparam1 = 'index'\n\t\t\tparam2 = security[4:6]\n\t\telse:\n\t\t\traise Exception('{} unknown security name type'.format(security))\n\t\tparam1 += unit\n\t\tpath = DataPath+'/{}/{}/{}'.format(param1,param2,security)\t\t\t\t\n\t\treturn path\n\n\tdef check_old_data(self):\n\t\t# 历史数据补全,盘后进行\n\t\tt1 = datetime.strptime('09:00:00', '%H:%M:%S').time()\n\t\tt2 = datetime.strptime('15:10:00', '%H:%M:%S').time()\n\t\tall_trade_days = self.dataApi.get_all_trade_days()\n\t\tall_trade_days = [i for i in all_trade_days if i<=datetime.now().date()]\n\t\tif datetime.now().time()t2 or datetime.now().date() not in all_trade_days:\n\t\t\tall_securities = self.dataApi.get_all_securities(types=['stock','index'])\t\n\t\t\tif datetime.now().time() < datetime.strptime('15:00:00', '%H:%M:%S').time() and datetime.now().date() in all_trade_days:\n\t\t\t\tend = pd.to_datetime(str(all_trade_days[-1])+' 00:00:00')\n\t\t\telse:\n\t\t\t\tend = pd.to_datetime(str(all_trade_days[-1])+' 15:00:00')\n\t\t\tfor security in all_securities:\n\t\t\t\t# to_bcolz模块会检查数据日期进行插入\n\t\t\t\tstart = pd.to_datetime(all_trade_days[-2])\n\t\t\t\tarray = self.dataApi.get_security_data(security, start, end)\n\t\t\t\tif len(array)>0:\n\t\t\t\t\tarray = array[:,np.newaxis,:]\n\t\t\t\t\tarray_minute, array_day, security_list = self.transform_data(array, [security])\n\t\t\t\t\tif array_minute is not None:\n\t\t\t\t\t\tself.to_bcolz(security_list, array_minute, is_day=False)\n\t\t\t\t\t\tself.to_bcolz(security_list, array_day, is_day=True)\n\t\t\t\t\t\tprint('{} {} {} {}'.format(security, start, end, array_minute.shape))\n\t\t\tprint('check old data done !')\n\t\telse:\n\t\t\tprint('only check old data after 15:10 or not in trade_days')\n\n\tdef check_today_data(self):\n\t\t# 当天数据补全,用于盘中中断\n\t\tif datetime.now().date() in self.dataApi.get_all_trade_days():\n\t\t\ttoday_date_list = self.get_today_date_list()\n\t\t\tself.time_list = []\n\t\t\tfor now_time in today_date_list:\n\t\t\t\tif now_time.strftime('%Y-%m-%d %H:%M:%S').encode('utf-8') in self.redis_conn.keys():\n\t\t\t\t\tself.time_list.append(now_time)\n\t\t\t\t\tnow_data, security_list = pickle.loads(self.redis_conn.get(now_time.strftime('%Y-%m-%d %H:%M:%S')))\n\t\t\t\telif now_time not in self.time_list:\n\t\t\t\t\tnow_data, security_list, now_time = self.dataApi.get_data(end_date=now_time)\n\t\t\t\t\tif now_data is not None:\n\t\t\t\t\t\tDataBcolz.to_redis(self.redis_conn, now_data, security_list, now_time)\n\t\t\t\t\t\tself.time_list.append(now_time)\n\t\t\t\t\t\tprint('{}'.format(now_time.strftime('%Y-%m-%d %H:%M:%S')))\n\t\telse:\n\t\t\tprint('only check today data in trade_days and before 15:30')\n\n\t@staticmethod\n\tdef transform_data(data, security_list):\n\t\t# 1分钟数据合成日级数据,转为bcolz的存储格式\n\t\t# data: 'date','factor','open','high','low','close','volume','high_limit','low_limit','paused'\n\t\t# 1m: 'date','factor','open','high','low','close','volume'\n\t\t# 1d: 'date','factor','open','high','low','close','volume','high_limit','low_limit','paused'\t\t\t\t\n\t\tdata = np.array(data)\n\t\tassert len(data.shape)==3, 'shape should be 3 dims, but got {}'.format(data.shape)\n\t\tassert (data.shape[0]%240==0 and data.shape[1]==1) or data.shape[0]==240, \\\n\t\t'shape should like (240*N,1,dims) or (240,None,dims), but got {}'.format(data.shape)\n\t\t# 去除停牌数据\n\t\tif data.shape[1]==1:\n\t\t\tindex = (data[:,:,-1]==0).any(axis=1)\n\t\t\tdata = data[index,:,:]\n\t\telse:\n\t\t\tindex = (data[:,:,-1]==0).transpose((1,0)).all(axis=1)\n\t\t\tdata = data[:,index,:]\t\n\t\t\tsecurity_list = np.array(security_list)[index]\n\t\tif len(data)==0:\n\t\t\treturn None, None, None\n\n\t\tlist_of_array = np.split(data, len(data)//240, axis=0)\n\t\tdef func(array):\n\t\t\ta1 = array[-1,:,0]\n\t\t\ta2 = array[-1,:,1]\n\t\t\ta3 = array[0,:,2]\n\t\t\ta4 = array[:,:,3].max(axis=0)\n\t\t\ta5 = array[:,:,4].min(axis=0)\n\t\t\ta6 = array[-1,:,5]\n\t\t\ta7 = array[:,:,6].sum(axis=0)\n\t\t\ta8 = array[-1,:,7]\n\t\t\ta9 = array[-1,:,8]\n\t\t\ta0 = array[-1,:,9]\n\t\t\tday_array = np.stack([a1,a2,a3,a4,a5,a6,a7,a8,a9,a0], axis=1)[np.newaxis,:,:]\t\t\t\t\n\t\t\treturn day_array\n\t\tarray_day = [func(i) for i in list_of_array]\n\t\tif len(array_day)>1:\n\t\t\tarray_day = np.concatenate(array_day, axis=0).transpose((1,2,0))\n\t\telse:\n\t\t\tarray_day = np.array(array_day[0]).transpose((1,2,0))\n\t\tarray_minute = data[:,:,:7].transpose((1,2,0))\n\t\treturn array_minute, array_day, security_list\n\n\t@staticmethod\n\tdef to_bcolz(security_list, data, is_day, target_path=None, names=None):\n\t\t# data shape (security_list, dims, length)\n\t\tfor i,security in enumerate(security_list):\n\t\t\tif is_day:\n\t\t\t\tunit = '1d'\n\t\t\t\tif names is None:\n\t\t\t\t\tnames = ['date','factor','open','high','low','close','volume','high_limit','low_limit','paused']\n\t\t\telse:\n\t\t\t\tunit = '1m'\n\t\t\t\tif names is None:\n\t\t\t\t\tnames = ['date','factor','open','high','low','close','volume']\n\t\t\tpath = DataBcolz.get_path(security, unit) if target_path is None else target_path\n\n\t\t\tarray = data[i].astype('float')\n\t\t\tif not os.path.exists(path):\n\t\t\t\tos.makedirs(path, exist_ok=True)\n\t\t\t\ttable = bcolz.ctable(rootdir=path,\n\t\t\t\t\t\t\t\t\t columns=list(array),\n\t\t\t\t\t\t\t\t\t names=names,\n\t\t\t\t\t\t\t\t\t mode='w')\n\t\t\t\ttable.flush()\n\t\t\telse:\n\t\t\t\t# 进行数据检查\n\t\t\t\ttable = bcolz.open(path, mode='a')\t\t\t\t\t\n\t\t\t\tdate_index = table.names.index('date')\n\t\t\t\tarray = array[:,array[0,:]>table[-1][date_index]]\n\t\t\t\tarray = list(map(lambda x:tuple(x), array))\n\t\t\t\ttable.append(array)\n\t\t\t\ttable.flush()\n\n\tdef run_before_trading_start(self):\n\t\tself.dataApi.login()\t\t\n\t\tself.dataApi.get_all_trade_days()\n\t\tself.dataApi.get_all_securities(types=['stock','index'])\n\n\tdef run_after_trading_end(self):\n\t\ttoday_data,security_list = DataBcolz.get_today_data(self.redis_conn)\n\t\tif len(today_data) != 240:\n\t\t\traise Exception('wrong data length, shape {}'.format(today_data.shape))\n\t\tend_date = datetime.utcfromtimestamp(today_data[0,0,0]).date()\n\t\tif end_date != datetime.now().date():\n\t\t\traise Exception('wrong time {}'.format(end_date))\t\t\t\n\t\tarray_minute, array_day, security_list = self.transform_data(today_data, security_list)\n\t\tif array_minute is not None:\t\t\t\t\t\t\t\t\t\n\t\t\tself.to_bcolz(security_list, array_minute, is_day=False)\n\t\t\tself.to_bcolz(security_list, array_day, is_day=True)\n\t\tself.check_data()\n\t\tprint('writing today data success,please check today data !!!')\n\t\tself.init_params()\n\t\tself.dataApi.logout()\n\n\tdef check_data(self):\n\t\t# 检查是否写入成功\n\t\tpath = DataBcolz.get_path('000001.XSHG', '1d')+'/date'\n\t\tdate = datetime.utcfromtimestamp(bcolz.open(path, mode='r')[-1]).date()\n\t\tif datetime.now().date() != date:\n\t\t\traise Exception('writing today data wrong, pre date is {}'.format(date))\n\n\tdef run_every_minute(self):\n\t\tdata, security_list, now_time = self.dataApi.get_data(end_date=datetime.now())\n\t\tif now_time.date() == datetime.now().date():\n\t\t\tif now_time not in self.time_list:\n\t\t\t\tDataBcolz.to_redis(self.redis_conn, data, security_list, now_time)\n\t\t\t\tself.time_list.append(now_time)\n\t\t\t\tprint(now_time.strftime('%Y-%m-%d %H:%M:%S'))\n\t\t\t\tif len(self.time_list) != len(self.get_today_date_list()):\n\t\t\t\t\tself.check_today_data()\t\t\t\n\nif __name__ == '__main__':\n\td = DataBcolz()\n\n\n\n\n","repo_name":"hevze/Miki","sub_path":"system/data/dataBcolz.py","file_name":"dataBcolz.py","file_ext":"py","file_size_in_byte":9218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"}
+{"seq_id":"15106654332","text":"# -*- coding: utf-8 -*-\n# @Time : 2018-12-20 13:38\n# @Author : luomingming\n\nimport unittest\nfrom pypinyin import lazy_pinyin\nfrom FxtDataAcquisition.service import distinct\nfrom FxtDataAcquisition.repository.config_repo import ConfigRepo\nfrom FxtDataAcquisition.settings import *\n\n\nclass DistrictTestCase(unittest.TestCase):\n def setUp(self):\n self.config_repo = ConfigRepo()\n\n def test_city(self):\n print(distinct.city('深圳'))\n print(distinct.city('深圳市'))\n\n def test_cities(self):\n lst = distinct.cities()\n for city in lst:\n print(city)\n\n def test_get_all_city_name(self):\n cities = self.config_repo.get_cities(SITE_FANGTAN)\n cities = \"'\" + \"','\".join(cities) + \"'\"\n cws = distinct.get_city_weights(cities, ''.join(str(i) for i in lazy_pinyin(SITE_FANGTAN)))\n sp = cws.split(\":\")\n cities = sp[0].split(',')\n weights = sp[1].split(',')\n ws = []\n for w in weights:\n ws.append(int(w))\n print('cities: {}, weights: {}'.format(cities, ws))\n\n","repo_name":"gongfei6644/gongfei","sub_path":"DataAcquisition/test/service/district_test.py","file_name":"district_test.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"72297700695","text":"from fpdf import FPDF\nimport pandas as pd\n\ndata_frame = pd.read_csv(\"topics.csv\")\n\npdf = FPDF(orientation=\"P\", unit=\"mm\", format=\"A4\")\npdf.set_auto_page_break(auto=False, margin=0)\n\ndef create_footer(page_height):\n \"\"\"Creates a footer for your page, on a per page basis\"\"\"\n pdf.ln(page_height)\n pdf.set_font(family=\"Times\", style=\"I\", size=8)\n pdf.set_text_color(180,180,180)\n pdf.cell(w=0, h=10, txt=row[\"Topic\"], align=\"R\")\n\ndef create_page_lines():\n \"\"\"Creates verticle lines every 10mm (pdf declaration) on your page\"\"\"\n num = 0\n for i in range(26):\n num += 10\n pdf.line(x1=10, x2=200, y1=18+num, y2=17+num)\n # for i in range(20, 298, 8):\n # pdf.line(10, i, 200, i)\n\n\nfor index, row in data_frame.iterrows():\n number_of_pages = int(row[\"Pages\"])\n pdf.add_page()\n create_page_lines()\n \n #Header\n pdf.set_font(family=\"Times\", style=\"B\", size=10)\n pdf.set_text_color(66,133,244)\n pdf.cell(w=0, h=9, txt=row[\"Topic\"], align=\"L\", ln=1, border=0)\n pdf.line(x1=10, x2=200, y1=18, y2=17)\n\n #Footer\n create_footer(265)\n\n for i in range(row[\"Pages\"] - 1):\n # [pdf.add_page() for i in range(number_of_pages - 1)]\n pdf.add_page()\n\n #Footer\n create_footer(275)\n\n create_page_lines()\n \n\npdf.output(\"output.pdf\")","repo_name":"ChrisSamHarris/PDF_Python","sub_path":"file_generation/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"31214807494","text":"##|=========================================================================================================================================\n## | Created by\t\t\t: Team A04 (Dawn Schnettler, Shashank Sharma, T Sai Giridhar, Yi Qin)\n## | Date Created\t\t: 2016 04 21\n## | Purpose\t\t\t: Label Space Dimension reduction using Genetic Algorithms\n## | Name\t\t\t\t: 00 GA\n## | Date Updated\t\t: _\n## | Version\t\t : v1.0\n## | Previous Version\t: _\n##|=========================================================================================================================================\n\n\n\n\n\n#--------------------------------------------------------------------------------------------------------------\n\n\n###| Importing packages\n\nimport numpy as np\nimport pandas as pd\nimport random as rnd\nfrom sklearn import linear_model\n\n\n\n####| Importing the dataset\n\norig_train = pd.read_csv(\"C:/Users/shashank/Desktop/Capstone Final/Train_Shashank.csv\")\nprint(orig_train.info())\n\n\norig_labels = orig_train.Response\norig_Id = orig_train.Id\n\n\norig_train = orig_train.drop(['Id'], axis=1)\n\nprint(orig_train.head())\nprint(orig_train.info())\n\n\n\nResponse = orig_train['Response']\ntrain = orig_train.drop(['Response'],axis = 1)\nprint(train.info())\n\n##| Testing Regression Model\n\nclf = linear_model.LinearRegression()\nclf.fit(train,Response)\n\n\n# The mean square error\nprint(\"Residual sum of squares: %.2f\"\n % np.mean((clf.predict(train) - Response) ** 2))\n\n\nResponse1 = []\nfor i in Response:\n Response1.append(i)\n\n\nResp = []\n#--------------------------------------------------------------------------------------------------------------\n\n###| Generating initial Population\n\n##| Setting up range for generating initial population\n\n##| Generating Population\n\ndef InitPop(A = 20):\n ##| Default: Generation 0 and Population Size 20\n Pop = []\n rnge = np.arange(-200, 200, 0.5)\n for i in range(0,A,1):\n tmp = rnd.sample(range(800),8)\n flag = []\n for j in tmp:\n flag.append(round(rnge[j],3))\n Pop.append(flag)\n \n return Pop\n\n\n##| Selecting Elite Solutions (Min Residual Eror)\n\n##| Selecting top 10 % (Elites) - with minimum residual error\n\ndef PopElites(Pop = [], B = 0.1):\n ##| Default: Population 0 and % Elites 10%\n if len(Pop) == 0:\n print(\"Empty Population\")\n return None\n \n \n ResidualPop = []\n global Resp\n Elites = []\n \n for i in range(0,len(Pop),1):\n temp = []\n for j in range(0, len(Response1),1):\n t = np.asscalar(Response1[j]-1)\n temp.append(Pop[i][t-1])\n Resp.append(temp)\n \n\n ResidualPop = []\n for i in range(0,20,1):\n temp = Resp[:][i]\n clf = linear_model.LinearRegression()\n clf.fit(train,temp)\n ResidualPop.append(np.mean((clf.predict(train) - temp) ** 2))\n \n m = min(ResidualPop)\n \n L = int(len(ResidualPop)*B)\n ln = len(ResidualPop)\n for i in range(0,L,1):\n j = 0\n while (j < ln):\n if m == ResidualPop[j]:\n Elites.append(j)\n ResidualPop.pop(j)\n ln = len(ResidualPop)\n j += 1\n m = min(ResidualPop) \n\n return Elites\n \n \n \n \n \n##| Performing crossover and creating new generation of solutions for non elite population \n \ndef crossover(Pop=[], Elites = []):\n \n Rem = []\n Crossed = []\n \n for i in range(0,len(Pop)):\n if i in Elites:\n continue\n else:\n Rem.append(i)\n \n for i in range(0,len(Rem)//2,1):\n selected = rnd.sample(range(0,len(Rem),1),2)\n thr = int(round(rnd.random()*7,0))\n C1 = Pop[Rem[selected[0]]]\n C2 = Pop[Rem[selected[1]]]\n C3 = C1[:thr:1] + C2[thr::1]\n C4 = C2[:thr:1] + C1[thr::1]\n Crossed.append(C3)\n Crossed.append(C4)\n \n return Crossed\n \n##| Performing mutation on new generation of solutions\n \ndef mutation(Crossed = [], C= 0.1):\n \n ToMutate = []\n for i in range(0,len(Crossed),1):\n tmp = []\n for j in range(0,8,1):\n tmp.append(rnd.random())\n ToMutate.append(tmp)\n \n \n for i in range(0,len(Crossed),1):\n for j in range(0,8,1):\n if(ToMutate [i][j] <= 0.1):\n if (rnd.random() < 0.5):\n Crossed[i][j] -= (Crossed[i][j]*0.1)\n else:\n Crossed[i][j] += (Crossed[i][j]*0.1)\n \n return Crossed\n\n##| Finalizing new generation\n\ndef NewGen(Pop = [], Elites = [], Crossed = []):\n El = []\n for i in Elites:\n El.append(Pop[i][:])\n \n return (El + Crossed)\n\n\n\n \n \n \n \n \ndef GenAlgo(A = 20, B = 0.1, C = 0.1, D = 10):\n \n Soln = [] \n Pop = InitPop(A)\n \n for i in range(0,D,1):\n Elites = PopElites(Pop, B)\n Crossed = crossover(Pop, Elites)\n Crossed = mutation(Crossed, C)\n Pop = NewGen(Pop, Elites, Crossed)\n\n for i in range(0,len(Pop),1):\n temp = []\n for j in range(0, len(Response1),1):\n t = np.asscalar(Response1[j]-1)\n temp.append(Pop[i][t-1])\n Resp.append(temp)\n \n\n ResidualPop = []\n for i in range(0,20,1):\n temp = Resp[:][i]\n clf = linear_model.LinearRegression()\n clf.fit(train,temp)\n ResidualPop.append(np.mean((clf.predict(train) - temp) ** 2))\n \n m = min(ResidualPop)\n flag = -1\n for i in range(0,len(ResidualPop),1):\n if m == ResidualPop[i]:\n flag = i\n \n Soln = Pop[flag][:]\n \n return Soln\n \n\n######| Using GA for Generating the optimal solution \n \nGenAlgo(A=20, B=0.1, C=0.1, D = 10) ##| Takes ~1 Hr to execute\n\n##| Here A <- Size of initial population (default = 20)\n##| B <- % of Elites in the population (default = 10%)\n##| C <- % of Mutation in the Crossovered Solution (default = 10%)\n##| D <- Number of Evolved Generations (default = 10)\n","repo_name":"sshashan08/00-Kaggle","sub_path":"Genetic Algorithms for Label Space Dimension Reduction.py","file_name":"Genetic Algorithms for Label Space Dimension Reduction.py","file_ext":"py","file_size_in_byte":6006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"19685688003","text":"#import git\n#git.Git(\"D:/mongoDB\").clone(\"https://github.com/farmanAbbasi/helloWorldJenkins.git\")\nimport os\nfrom git import Repo\nrepo_dir = '/mongoDB/helloWorldJenkins'\nrepo = Repo(repo_dir)\nfile_list = [\n '/readme.txt'\n]\ncommit_message = 'Add simple regression analysis'\nrepo.index.add(file_list)\nrepo.index.commit(commit_message)\norigin = repo.remote('origin')\norigin.push()\n","repo_name":"Sanket-Tantia/helloWorldJenkins","sub_path":"gitter.py","file_name":"gitter.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"1792216953","text":"import random\r\n\r\n\"\"\"\r\n What is recursion?\r\n Recursion is where a function calls itself.\r\n \r\n Two different types of recursion:\r\n linear recursion (each function calls one other function)\r\n branching recursion (each function calls maybe multiple other copies of itself)\r\n\"\"\"\r\n\r\n\r\ndef factorial(n):\r\n \"\"\" factorial(5) = 5 * factorial(4) = 5 * 24 = 120\r\n factorial(4) = 4 * factorial(3) = 4 * 6 = 24\r\n factorial(3) = 3 * factorial(2) = 3 * 2 = 6\r\n factorial(2) = 2 * factorial(1) = 2 * 1 = 2\r\n factorial(1) = 1 * factorial(0) = 1 * 1 = 1\r\n factorial(0) = 1\r\n \"\"\"\r\n if n == 0:\r\n return 1\r\n else:\r\n # linear recursion\r\n return n * factorial(n - 1)\r\n\r\n\r\n\"\"\"\r\n iterative vs recursive\r\n \r\n loop vs recusion\r\n\"\"\"\r\n\r\n\r\ndef check_parens(the_string):\r\n level = 0\r\n for char in the_string:\r\n if char == \"(\":\r\n level += 1\r\n if char == \")\":\r\n level -= 1\r\n if level < 0:\r\n return False\r\n \r\n return level == 0\r\n \"\"\"\r\n if level == 0:\r\n return True\r\n else:\r\n return False\r\n \"\"\"\r\n\r\n\r\ndef check_parens_rec(the_string):\r\n print('now checking: ', the_string)\r\n \r\n if not the_string: # the_string == \"\"\r\n return True\r\n \r\n if the_string[0] != \"(\":\r\n return False\r\n # string not empty and the start == \"(\"\r\n # find the matching index\r\n level = 0\r\n match_location = 0\r\n for i in range(len(the_string)):\r\n if the_string[i] == \"(\":\r\n level += 1\r\n elif the_string[i] == \")\":\r\n level -= 1\r\n if level == 0 and match_location == 0:\r\n match_location = i\r\n # check both parts (start upto the match), (the match to the end)\r\n start_check = check_parens_rec(the_string[1: match_location])\r\n end_check = True\r\n if match_location < len(the_string) - 1:\r\n end_check = check_parens_rec(the_string[match_location + 1: len(the_string)])\r\n \r\n return start_check and end_check\r\n\r\n\r\n# print(check_parens_rec(input('Enter a string with parentheses: ')))\r\n\r\nPASSABLE = '_'\r\nBLOCKED = '*'\r\nTHE_GOLD = 'G'\r\nVISITED = 'v'\r\n\r\n\r\ndef make_grid(rows, cols):\r\n the_grid = []\r\n for i in range(rows):\r\n new_row = []\r\n for j in range(cols):\r\n new_row.append(random.choices([PASSABLE, BLOCKED], weights=[4, 1])[0])\r\n the_grid.append(new_row)\r\n \r\n x_gold = random.randint(0, rows - 1)\r\n y_gold = random.randint(0, cols - 1)\r\n the_grid[x_gold][y_gold] = THE_GOLD\r\n \r\n the_grid[0][0] = PASSABLE\r\n \r\n return the_grid\r\n\r\n\r\ndef display_grid(the_grid):\r\n for row in the_grid:\r\n print(' '.join(row))\r\n\r\n\r\ndef find_the_gold(current_pos, the_grid, counter):\r\n # current_pos = [y, x]\r\n y, x = current_pos\r\n if the_grid[y][x] == THE_GOLD:\r\n return [current_pos]\r\n \r\n if the_grid[y][x] != PASSABLE:\r\n return []\r\n \r\n the_grid[y][x] = str(counter)\r\n \r\n went_up = []\r\n went_right = []\r\n went_down = []\r\n went_left = []\r\n # go up\r\n if y - 1 > 0:\r\n went_up = find_the_gold([y - 1, x], the_grid, counter + 1)\r\n # go right\r\n if x + 1 < len(the_grid[y]):\r\n went_right = find_the_gold([y, x + 1], the_grid, counter + 1)\r\n # go down\r\n if y + 1 < len(the_grid):\r\n went_down = find_the_gold([y + 1, x], the_grid, counter + 1)\r\n # go left\r\n if x - 1 > 0:\r\n went_left = find_the_gold([y, x - 1], the_grid, counter + 1)\r\n \r\n if went_left:\r\n return went_left + [current_pos]\r\n if went_down:\r\n return went_down + [current_pos]\r\n if went_up:\r\n return went_up + [current_pos]\r\n if went_right:\r\n return went_right + [current_pos]\r\n\r\n # return went_down or went_up or went_right or went_left\r\n\r\n\r\nmy_grid = make_grid(10, 10)\r\ndisplay_grid(my_grid)\r\nprint(find_the_gold([0, 0], my_grid, 1))\r\ndisplay_grid(my_grid)\r\n","repo_name":"UMBC-CMSC-Hamilton/cmsc201-spring22","sub_path":"recursion/recursion_day_2.py","file_name":"recursion_day_2.py","file_ext":"py","file_size_in_byte":3983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"39421731110","text":"# coding=utf-8\nimport csv\nimport re\n\npath = 'result/all_writter.csv'\n\nwith open(path, 'r', encoding='utf-8') as f:\n reader = csv.DictReader(f)\n for row in reader:\n if row['别名'] != '':\n name_list = row['别名'].split(',')\n for name in name_list:\n if len(name) == 1:\n print(row['本名'])\n","repo_name":"peteryang1/PKU-software-implementation","sub_path":"2019/2019_QASystem/code/writter/property_test.py","file_name":"property_test.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"31424621973","text":"from sqlalchemy.orm import joinedload\n\nfrom clld import interfaces\nfrom clld.web.adapters.geojson import GeoJsonParameter\nfrom clld.db.meta import DBSession\nfrom clld.db.models.common import ValueSet, Value, DomainElement, Parameter\nfrom clld_phylogeny_plugin.interfaces import ITree\nfrom clld_phylogeny_plugin.tree import Tree\nfrom clld_glottologfamily_plugin.models import Family\nfrom clldutils.misc import lazyproperty\n\nfrom grambank import models\n\n\nclass GrambankTree(Tree):\n def __init__(self, *args, pids=None, **kw):\n self.pids = pids or []\n Tree.__init__(self, *args, **kw)\n\n @lazyproperty\n def parameters(self):\n if self.pids:\n return DBSession.query(Parameter) \\\n .filter(Parameter.id.in_(self.pids)) \\\n .options(\n joinedload(Parameter.valuesets).joinedload(ValueSet.values),\n joinedload(Parameter.domain)) \\\n .all()\n return []\n\n def get_marker(self, valueset):\n res = valueset.values[0].domainelement.jsondata['icon']\n return res[0], '#' + res[1:]\n\n\nclass GrambankGeoJsonParameter(GeoJsonParameter):\n def feature_iterator(self, ctx, req):\n de = req.params.get('domainelement')\n if de:\n query = DBSession.query(Value).join(DomainElement).filter(DomainElement.id == de)\n if ('family' in req.params) and req.params['family']:\n query = query\\\n .join(ValueSet)\\\n .join(ValueSet.language)\\\n .join(models.GrambankLanguage.family)\\\n .filter(Family.id == req.params['family'])\n return [\n v.valueset for v in query.options(\n joinedload(Value.valueset).joinedload(ValueSet.values),\n joinedload(Value.valueset, ValueSet.language))]\n return self.get_query(ctx, req)\n\n\ndef includeme(config):\n config.registry.registerUtility(GrambankTree, ITree)\n config.register_adapter(\n GrambankGeoJsonParameter,\n interfaces.IParameter,\n name=GrambankGeoJsonParameter.mimetype)\n","repo_name":"clld/grambank","sub_path":"grambank/adapters.py","file_name":"adapters.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"}
+{"seq_id":"4384169137","text":"from time import sleep\nimport ex115.lib.arquivo\n\nc = {'sem cor': '\\033[m',\n 'vermelho': '\\033[0;31m',\n 'verde': '\\033[32m',\n 'amarelo': '\\033[0;33m',\n 'azul': '\\033[0;34m',\n 'roxo': '\\033[0;30;45m',\n 'branco': '\\033[107;30m'\n }\n\n\ndef leiaint(txt):\n while True:\n try:\n num = int(input(txt))\n except (ValueError, TypeError):\n print(f'\\033[31mTivemos um problema com os valores informados\\033[m')\n except (KeyboardInterrupt):\n print(f'\\033[31mEntrada de dados interrompida pelo usuário.\\033[m')\n return 3\n else:\n return num\n\ndef linha(tam=40):\n print('-' * tam)\n\n\ndef menu():\n\n cabeçalho(\"MENU PRINCIPAL\")\n\n print(f'{c[\"amarelo\"]}1 - {c[\"azul\"]}Ver as pessoas cadastradas')\n print(f'{c[\"amarelo\"]}2 - {c[\"azul\"]}Cadastrar novas pessoas')\n print(f'{c[\"amarelo\"]}3 - {c[\"azul\"]}Sair{c[\"sem cor\"]}')\n linha()\n\n\ndef cabeçalho(txt):\n linha()\n print(txt.center(40),c['sem cor'])\n linha()\n\n\ndef lista (txt):\n #cabeçalho('Lista de Cadastros')\n ex115.lib.arquivo.lerArquivo(txt)\n sleep(1)\n\ndef cadastro(txt):\n cabeçalho('CADASTRO DE PESSOAS')\n nome=str(input('Nome: ')).strip().title()\n idade=leiaint('Idade: ')\n ex115.lib.arquivo.cadastro(txt,nome,idade)\n sleep(1)\n\n\ndef escolha(txt):\n while True:\n menu()\n opção = leiaint((f'{c[\"verde\"]}Sua Opção: {c[\"sem cor\"]}'))\n if opção == 1:\n lista(txt)\n #ex115.lib.arquivo.lerArquivo(txt)\n elif op��ão == 2:\n cadastro(txt)\n elif opção == 3:\n cabeçalho('Saindo do Sistema...Até Logo!!')\n sleep(1)\n break\n else:\n print(f'{c[\"vermelho\"]}ERRO - Opção Inválida{c[\"sem cor\"]}')\n","repo_name":"yurixf/Python","sub_path":"Exercicios/ex115/lib/interface/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"27015007332","text":"\"\"\"doubly linked list.\"\"\"\nfrom graphviz import Graph\nclass Node(object):\n\tdef __init__(self, key):\n\t\tself.key = key\n\t\tself.next = None\n\t\tself.prev = None\n\n\nclass doublylinked_list(object):\n\tdef __init__(self):\n\t\tself.Lhead = None\n\n\tdef isEmpyty(self):\n\t\treturn self.Lhead == None\n\n\tdef add(self, key):\n\t\tnode = Node(key)\n\t\tnode.next = self.Lhead\n\t\tif self.Lhead != None:\n\t\t\tself.Lhead.prev = node\n\t\tself.Lhead = node\n\t\tnode.prev = None\n\n\tdef search(self, key):\n\t\tif self.isEmpyty():\n\t\t\traise ValueError('linked list is empty')\n\t\tnode = self.Lhead\n\t\twhile node != None and node.key != key:\n\t\t\tnode = node.next\n\t\treturn node\n\n\tdef delet(self, key):\n\t\tnode = self.search(key)\n\t\tif node.prev != None:\n\t\t\tnode.prev.next = node.next\n\t\telse:\n\t\t\tself.Lhead = node.next\n\t\tif node.next != None:\n\t\t\tnode.next.prev = node.prev\n\n\tdef printLinked(self):\n\t\tdl = Graph('doublylinked', node_attr={'shape':'record'})\t\n\t\tif self.isEmpyty():\n\t\t\tpass\n\t\telif self.Lhead.next == None:\n\t\t\tdl.node(str(self.Lhead.key))\n\t\telse:\n\t\t\tself._draw(self.Lhead, dl)\n\t\tdl.render('doublylinked.gv', view=True)\n\n\tdef _draw(self, node, dl):\n\t\tif node!= None:\n\t\t\tkey = str(node.key)\n\t\t\tdl.node(key)\n\t\t\tnextNode = node.next\n\t\t\tif node.prev != None:\n\t\t\t\tprevKey = str(node.prev.key)\n\t\t\t\tdl.edge(prevKey, key)\n\t\t\tif nextNode != None:\n\t\t\t\tnextKey = str(nextNode.key)\n\t\t\t\tdl.node(nextKey)\n\t\t\t\tdl.edge(key, nextKey)\n\t\t\t\tgrandNode = nextNode.next\n\t\t\t\tself._draw(grandNode, dl)\n\ndef main():\n\tdl = doublylinked_list()\n\tL = [1, 2, 3, 5, 6, 7, 0]\n\tfor l in L:\n\t\tdl.add(l)\n\tdl.delet(5)\n\tdl.delet(6)\n\tdl.printLinked()\n\nif __name__ == '__main__':\n\tmain()\n\n","repo_name":"JiayinChen-Jen/Algorithms","sub_path":"linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"72553877032","text":"import sys\ninput = sys.stdin.readline\n\ndef Bsearch(arr,v,low,high):\n if low > high:\n return 0\n mid = (low+high) // 2\n if arr[mid] < v:\n return Bsearch(A,b,mid + 1,high)\n elif arr[mid] > v:\n return Bsearch(A,b,low,mid - 1)\n else:\n return dic.get(v)\n\nN = int(input())\nA = [int(x) for x in input().split()]\ndic = {}\nfor a in A:\n if a in dic:\n dic[a] += 1\n else:\n dic[a] = 1\n\nM = int(input())\nB = [int(x) for x in input().split()]\nA.sort()\n\nfor b in B:\n print(Bsearch(A,b,0,N - 1),end=' ')","repo_name":"jayyeong/Algorithm","sub_path":"Baekjoon/BOJproblem/BOJ10816숫자카드2.py","file_name":"BOJ10816숫자카드2.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"34648533368","text":"import numpy as np\nimport sys\nimport cv2\nfrom i3d_inception import Inception_Inflated3d\nfrom data import preprocess_input\nfrom utils import getPredictions, getTopNindecies\n\ndef readVideo(vid,clipDuration = 64): \n video = []\n clip = []\n i = 0\n cap = cv2.VideoCapture(vid)\n if (cap.isOpened()== False): \n print(\"Error opening video stream or file\")\n while(cap.isOpened()):\n ret, frame = cap.read()\n if ret == True:\n frame = preprocess_input(frame)\n clip.append(frame) \n i+=1\n if(i == clipDuration):\n video.append(clip)\n clip=[]\n i=0\n else:\n break \n\n return np.asarray(video, dtype=np.float32) \n\n\n\ndef classify(videoPath, model):\n \n kinetics_classes = [x.strip() for x in open('label_map.txt', 'r')]\n\n video = readVideo(videoPath)\n out_logits = model.predict(video, batch_size=len(video), verbose=0, steps=None)\n predictions = out_logits\n print('Top 5 predictions: ')\n final_prediction = np.zeros(len(predictions[0]))\n for pred in predictions:\n final_prediction+=pred\n final_prediction/=len(predictions)\n\n\n top5indices = getTopNindecies(final_prediction,5)\n for index in top5indices:\n print(final_prediction[index], kinetics_classes[index])\n\n\nif __name__ == \"__main__\":\n videoPath = sys.argv[1]\n rgb_model = Inception_Inflated3d(\n include_top=True,\n weights='rgb_inception_i3d',\n input_shape=(64, 224, 224, 3),\n classes=400,\n endpoint_logit=False)\n classify(videoPath,rgb_model)","repo_name":"null-void-Q/violence_detection_on_videos","sub_path":"classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"}
+{"seq_id":"26216857671","text":"# https://leetcode.com/problems/check-if-all-characters-have-equal-number-of-occurrences/\n# 1AC\nfrom collections import defaultdict\n\nclass Solution:\n def areOccurrencesEqual(self, s: str) -> bool:\n mm = defaultdict(int)\n for c in s:\n mm[c] += 1\n st = set()\n for v in mm.values():\n st.add(v)\n return len(st) == 1\n","repo_name":"zhuli19901106/leetcode-zhuli","sub_path":"algorithms/1501-2000/1941_check-if-all-characters-have-equal-number-of-occurrences_1_AC.py","file_name":"1941_check-if-all-characters-have-equal-number-of-occurrences_1_AC.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":557,"dataset":"github-code","pt":"72"}
+{"seq_id":"913273044","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function, division\nimport os, sys, argparse, pdb, math, json, subprocess, array\nimport tmProgressBar\nimport stealthEnv, ROOT\n\nROOT.gROOT.SetBatch(ROOT.kTRUE)\nROOT.TH1.AddDirectory(ROOT.kFALSE)\n\nSTMin = 700.\nSTMax = 3500.\nSTBoundariesSourceFile = \"STRegionBoundaries_normOptimization.dat\"\n\nselection = \"singlefake\"\nidentifier = \"data\"\nyear = \"2017\"\nyearPattern = \"{y}\".format(y=year)\nif (year == \"all\"): yearPattern = \"*\"\nsourceFilePattern = stealthEnv.EOSPrefix + \"/store/user/lpcsusystealth/selections/combined_DoublePhoton_lowerSTThreshold/merged_selection_{i}_singlephoton_{yP}_control_{s}.root\".format(i=identifier, s=selection, yP=yearPattern)\ngetMCWeights = True\nif (identifier[0:4] == \"data\"): getMCWeights = False\noutputDirectory = stealthEnv.analysisRoot + \"/STDistributions_singlephoton\"\nevtSTEM_minAllowed = 200.\n\n# selection = \"control\"\n# identifier = \"MC_QCD\"\n# year = \"all\"\n# yearPattern = \"{y}\".format(y=year)\n# if (year == \"all\"): yearPattern = \"*\"\n# sourceFilePattern = stealthEnv.EOSPrefix + \"/store/user/lpcsusystealth/selections/combined_DoublePhoton_lowerSTThreshold/merged_selection_{i}_{yP}_{s}.root\".format(i=identifier, s=selection, yP=yearPattern)\n# if ((identifier == \"MC_GJet\") or (identifier == \"MC_QCD\")):\n# if (year == \"all\"):\n# sourceFilePattern = stealthEnv.EOSPrefix + \"/store/user/lpcsusystealth/selections/combined_DoublePhoton_lowerSTThreshold/merged_selection_{i}*_{s}.root\".format(i=identifier, s=selection, yP=yearPattern)\n# else:\n# sys.exit(\"ERROR: Unrecognized (year, identifier) combo: ({y}, {i})\".format(y=year, i=identifier))\n# getMCWeights = True\n# if (identifier[0:4] == \"data\"): getMCWeights = False\n# outputDirectory = stealthEnv.analysisRoot + \"/STDistributions_doublephoton\"\n# evtSTEM_minAllowed = -1.0\n\nif not(os.path.isdir(outputDirectory)): subprocess.check_call(\"mkdir -p {oD}\".format(oD=outputDirectory), shell=True, executable=\"/bin/bash\")\n\nSTRegionBoundariesFileObject = open(STBoundariesSourceFile)\nSTBoundaries = []\nfor STBoundaryString in STRegionBoundariesFileObject:\n if (STBoundaryString.strip()):\n STBoundary = float(STBoundaryString.strip())\n STBoundaries.append(STBoundary)\nSTBoundaries.append(3500)\nnSTSignalBins = len(STBoundaries) - 2 # First two lines are lower and upper boundaries for the normalization bin\nn_STBins = len(STBoundaries) - 1\n\nprint(\"Getting ST datasets for source: {sF}\".format(sF=sourceFilePattern))\n\ninputChain = ROOT.TChain(\"ggNtuplizer/EventTree\")\ninputChain.SetMaxTreeSize(100000000000) # 1 TB\ninputChain.Add(sourceFilePattern)\n\nnEntries = inputChain.GetEntries()\nprint(\"Available nEvts: {n}\".format(n=nEntries))\n\noutputFile = ROOT.TFile.Open(\"{oD}/distributions_{y}_{s}_{i}.root\".format(oD=outputDirectory, y=year, i=identifier, s=selection), \"RECREATE\")\n\nSTArrays = {}\nweightArrays = {}\nSTTrees = {}\nSTDistributions = {}\nfor nJetsBin in range(2, 7):\n STDistributions[nJetsBin] = ROOT.TH1F(\"h_ST_{n}JetsBin\".format(n=nJetsBin), \"ST distribution: {n} Jets;ST\".format(n=nJetsBin), n_STBins, array.array('d', STBoundaries))\n STDistributions[nJetsBin].Sumw2()\n STArrays[nJetsBin] = array.array('d', [0.])\n weightArrays[nJetsBin] = array.array('d', [0.])\n STTrees[nJetsBin] = ROOT.TTree(\"STTree_{nJetsBin}JetsBin\".format(nJetsBin=nJetsBin), \"STTree_{nJetsBin}JetsBin\".format(nJetsBin=nJetsBin))\n (STTrees[nJetsBin]).Branch('ST', (STArrays[nJetsBin]), 'ST/D')\n (STTrees[nJetsBin]).Branch('weight', (weightArrays[nJetsBin]), 'weight/D')\n\nprogressBar = tmProgressBar.tmProgressBar(nEntries)\nprogressBarUpdatePeriod = max(1, nEntries//50)\nprogressBar.initializeTimer()\nfor eventIndex in range(0, nEntries):\n if (eventIndex % progressBarUpdatePeriod == 0): progressBar.updateBar(eventIndex/nEntries, eventIndex)\n treeStatus = inputChain.LoadTree(eventIndex)\n if (treeStatus < 0):\n break\n evtStatus = inputChain.GetEntry(eventIndex)\n if (evtStatus <= 0):\n continue\n ST = inputChain.b_evtST\n nJetsDR = inputChain.b_nJetsDR\n if ((ST < STMin) or (ST > STMax)): continue\n nJetsBin = min(nJetsDR, 6)\n if (nJetsBin < 2): continue\n\n eventWeight = 1.0\n if getMCWeights: eventWeight = (inputChain.b_MCXSecWeight*inputChain.genWeight*inputChain.b_evtPrefiringWeight*inputChain.b_evtphotonMCScaleFactor*inputChain.b_PUWeightNoSelection)\n\n evtSTEM = inputChain.b_evtST_electromagnetic\n if ((evtSTEM_minAllowed > 0.) and (evtSTEM <= evtSTEM_minAllowed)): continue\n\n STBinIndex = STDistributions[nJetsBin].FindFixBin(ST)\n STBinWidth = STDistributions[nJetsBin].GetXaxis().GetBinUpEdge(STBinIndex) - STDistributions[nJetsBin].GetXaxis().GetBinLowEdge(STBinIndex)\n eventWeight_histograms = eventWeight/STBinWidth\n\n STDistributions[nJetsBin].Fill(ST, eventWeight_histograms)\n (STArrays[nJetsBin])[0] = ST\n (weightArrays[nJetsBin])[0] = 1.0\n if getMCWeights:\n (weightArrays[nJetsBin])[0] = (inputChain.b_MCXSecWeight*inputChain.genWeight*inputChain.b_evtPrefiringWeight*inputChain.b_evtphotonMCScaleFactor*inputChain.b_PUWeightNoSelection)\n (STTrees[nJetsBin]).Fill()\n\nprint()\nfor nJetsBin in range(2, 7):\n outputFile.WriteTObject(STDistributions[nJetsBin])\n outputFile.WriteTObject(STTrees[nJetsBin])\n\noutputFile.Close()\nprint(\"Output file written, path: {oD}/distributions_{y}_{s}_{i}.root\".format(oD=outputDirectory, y=year, i=identifier, s=selection))\n\nprint(\"Done!\")\n","repo_name":"tanmaymudholkar/STEALTH","sub_path":"miscUtils/saveSTDistributions.py","file_name":"saveSTDistributions.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"}
+{"seq_id":"14072989679","text":"from socket import *\nimport time\n\nclientSocket = socket(AF_INET, SOCK_DGRAM)\nclientSocket.connect(('127.0.0.1', 12000))\nclientSocket.settimeout(1)\n\nfor x in range(9):\n message = \"Testando\"\n msg = message.encode()\n pingStart = time.time()\n address = clientSocket.send(msg)\n try:\n responseServer = clientSocket.recvfrom(1024)\n except timeout:\n print(\"Tempo limite para conexão\")\n continue\n pingEnd = time.time()\n if message != '':\n print(message)\n rtt = pingEnd - pingStart\n print(\"Resposta do ping: \", rtt) \nclientSocket.close()\n","repo_name":"BadMickey/Redes-IFG","sub_path":"Ping UDP/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"34711519806","text":"import information\nimport socket\nfrom dataTypes import *\nfrom noiseEncryption import *\nimport hashlib\nimport time\n\nclass Client:\n\n def __init__(self,serverHostname):\n self.serverHostname=serverHostname\n self.com_socket=None\n\n\n self.client = Noise()\n\n self.client.connectToNoise(\"localhost\", 6222)\n\n\n def SetupConnection(self):\n protocol = U8(information.protocol[\"Mining Protocol\"])\n min_version = U16(information.min_version)\n max_version = U16(information.max_version)\n flags = U32(information.flags)\n endpoint_host = STR0_255(information.endpoint_host)\n endpoint_port = U16(information.endpoint_port)\n vendor = STR0_255(information.vendor)\n hardware_version = STR0_255((information.hardware_version))\n firmware = STR0_255(information.firmware)\n device_id = STR0_255(information.device_id)\n\n payload = protocol+min_version+max_version+flags+endpoint_host+endpoint_port+vendor+hardware_version+firmware+device_id\n frame = FRAME(0x0abc,\"SetupConnection\",payload)\n\n self.client.sendNoiseFrame(frame)\n\n frame = self.client.receiveNoiseFrame()\n msg_type = frame[2]\n\n if msg_type == 0x01:\n print(\"setup success\")\n return True\n elif msg_type ==0x02:\n print(\"setup error\")\n self.client.closeNoiseConnection()\n return False\n else:\n print(\"bad msg type\")\n\n def OpenStandartMiningChannel(self):\n\n unique = str(time.time())\n unique = hashlib.sha256(unique.encode()).hexdigest()\n request_id = U32(int(unique[:8],16))\n\n #print(int(unique[:8],16))\n\n #print(request_id)\n\n user_identity = STR0_255(information.username)\n\n nominal_hash_rate = U32(information.hash_rate)\n\n max_target = U256(information.max_target)\n\n payload = request_id+user_identity+nominal_hash_rate+max_target\n\n frame = FRAME(0,\"OpenStandardMiningChannel\",payload)\n\n self.client.sendNoiseFrame(frame)\n\n frame_server = self.client.receiveNoiseFrame()\n\n msg_type = frame_server[2]\n\n if msg_type == 0x11:\n\n\n request_id_server = parse_bytes_to_int(frame_server,6,10)\n #print(request_id_server)\n\n channel_id = int(parse_bytes_to_int(frame_server,10,14))\n #print(channel_id)\n\n print(\"open standard channel success. Channel id:\", channel_id)\n\n return channel_id\n elif msg_type == 0x12:\n length = frame_server[11]\n error_code = frame_server[11:12+length]\n print(\"open standard channel error: \"+error_code.decode())\n\n return\n else:\n print(\"bad msg type\")\n\n\n\n\n\n\n\n def MiningProtocolSetup(self):\n setup = self.SetupConnection()\n if setup:\n channel_id = self.OpenStandartMiningChannel()\n\n if channel_id:\n\n pass\n\n\n\n\n\n \"\"\"s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print(self.serverHostname, information.endpoint_port)\n\n s.connect((self.serverHostname,information.endpoint_port))\n\n\n #print(\"tcp connection failed.\")\n\n setupMsg = self.SetupMessage()\n\n print(\"setup msg\", setupMsg)\n\n\n s.send(setupMsg)\n\n response = s.recv(1024)\n\n print(\"resp\",response)\n\n\n s.settimeout(1)\n\n try:\n response = s.recv(1024)\n\n except socket.timeout:\n self.com_socket=s\n print(\"Setup ok.\")\n\n\n\n def OpenStandardMiningChannel(self):\n\n if not self.com_socket:\n print(\"Setup connection must be done first.\")\n return\n else:\n self.SetupMiningConnection()\"\"\"\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"stratumv2/stratumv2","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3774,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"}
+{"seq_id":"8853512827","text":"#!/usr/bin/python\n\nimport socket\nimport struct\nimport sys\n\ndef send(message, ip_target, port_target):\n sckt = socket.socket()\n sckt.connect((str(ip_target), int(port_target)))\n sckt.send(message)\n respuesta = sckt.recv(4096)\n sckt.close()\n return str(respuesta)\n\n\ndef recive(ip_source, port_source):\n # Create a TCP/IP socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n # Bind the socket to the port\n server_address = (str(ip_source), int(port_source))\n sock.bind(server_address)\n # Listen for incoming connections\n sock.listen(1)\n while True:\n # Wait for a connection\n connection, client_address = sock.accept()\n try:\n # Receive the data in small chunks and retransmit it\n while True:\n data = connection.recv(4096)\n if data:\n connection.sendall('ACK!')\n connection.close()\n sock.close()\n return str(data)\n else:\n connection.close()\n sock.close()\n break\n finally:\n # Clean up the connection\n connection.close()\n sock.close()","repo_name":"Sergiogonzalezpi/shrek-tor","sub_path":"shrek_protocols/protocol_sendrecv.py","file_name":"protocol_sendrecv.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"70922273514","text":"from bs4 import BeautifulSoup\r\nimport os,requests\r\nfilename_1=input('请输入要读取的字典名?\\n')\r\nfilename_2=filename_1 + '.txt'\r\nf=open(filename_2)\r\nurls=[]\r\nline=f.readline()\r\nwhile line:\r\n\tn_tmp=len(line)-1\r\n\turls.append(line[:n_tmp])\r\n\tline = f.readline()\r\nf.close()\r\nprint(urls)\r\nn=0\r\nurls_len=len(urls)\r\nfor url in urls:\r\n\tn+=1\r\n\tr1=requests.get(url,'lxml')\r\n\tsoup=BeautifulSoup(r1.text,'lxml')\r\n\tT=soup.find_all('p')\r\n\tprint(\"进度:\"+str(n)+'/'+str(urls_len))\r\n\tfor i in T:\r\n\t\toutfile=filename_1+'_resu.txt'\r\n\t\tfo=open(outfile,'a',encoding='utf-8')\r\n\t\tfo.write(i.text)\r\n\t\tfo.close()\r\n\r\n\r\n\r\n","repo_name":"yinyang-Aiden/johnorwell_AiResearch","sub_path":"code/the_spider_For_text.py","file_name":"the_spider_For_text.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"14439502175","text":"def main():\r\n fin = open('words.txt')\r\n for i in fin:\r\n riga = fin.readline()\r\n esito=controllo(riga)\r\n if(esito==True):\r\n print(riga)\r\n\r\ndef controllo(parola):\r\n cont=0\r\n i=0\r\n while i = maxvalue ):\n return False\n \n if not isValidBST(root.left, minvalue, root.val) or not isValidBST( root.right, root.val, maxvalue ):\n return False\n \n return True\n\n# Create a root node\nroot = Node(5)\nroot.left = Node(3)\nroot.right = Node(8)\nprint(validateBST(root))\n","repo_name":"Prashant47/algorithms","sub_path":"tree/98_validate_bst.py","file_name":"98_validate_bst.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"31303357062","text":"import numpy as np\nimport time\nimport tensorflow as tf\nfrom sklearn.metrics import accuracy_score\n\nfrom model.sequential import SequentialModel\nfrom model.layers.layer import Layer\nfrom model.layers.convolutional import Convolutional\nfrom model.layers.dense import Dense\nfrom model.layers.flatten import Flatten\nfrom model.layers.pooling import Pooling\n\nnp.random.seed(13517)\n\ndef load_images_as_dataset(directory, image_size, batch_size, rescale=True) :\n test_dataset = tf.keras.preprocessing.image_dataset_from_directory(\n directory,\n labels='inferred',\n label_mode='int',\n class_names=['dogs', 'cats'],\n batch_size=batch_size,\n image_size=image_size\n )\n rescale_factor = 1.0/255 if rescale else 1\n\n list_images = []\n list_labels = []\n for images, labels in test_dataset.take(1) :\n for i in range(len(images)) :\n list_images.append(images[i].numpy().transpose(2, 0, 1) * rescale_factor)\n list_labels.append(labels[i].numpy())\n return list_images, list_labels\n\nif __name__ == \"__main__\" :\n IMG_DIR_TEST = '../data/test'\n IMG_DIR_TRAINING = '../data/train'\n IMAGE_SIZE = (150, 150)\n BATCH_SIZE = 15\n\n # Prepare dataset\n list_test_images, list_test_labels = load_images_as_dataset(IMG_DIR_TEST, IMAGE_SIZE, BATCH_SIZE)\n list_train_images, list_train_labels = load_images_as_dataset(IMG_DIR_TRAINING, IMAGE_SIZE, BATCH_SIZE)\n\n # Define models\n model1 = SequentialModel([\n Convolutional(4, (3, 3), (150, 150, 3), 0, 1),\n Pooling((2, 2), 1),\n Convolutional(8, (3, 3)),\n Pooling((2, 2), 1),\n Flatten(),\n Dense(256, 'relu'),\n Dense(1, 'sigmoid')\n ])\n print(list_test_labels)\n # # model2 = SequentialModel([\n # # Convolutional(16, (3, 3), (150, 150, 3)),\n # # Pooling((2, 2), 2),\n # # Convolutional(32, (3, 3)),\n # # Pooling((2, 2), 2),\n # # Convolutional(64, (3, 3)),\n # # Pooling((2, 2), 2),\n # # Flatten(),\n # # Dense(512, 'relu'),\n # # Dense(1, 'sigmoid')\n # # ])\n\n # # Load model\n # # model1.load_model_from_json('testing.json')\n\n # # List of predicted labels by model\n list_predicted = []\n\n # model1.fit2(list_images[1:2], list_labels[1:2])\n model1.fit(list_train_images, list_train_labels, batch_size=5)\n\n # # Predict using defined models\n print('\\n================')\n print('Predict')\n print('================')\n list_predicted = model1.predict(np.array(list_test_images), np.array(list_test_labels), True)\n print('Model accuracy:', accuracy_score(list_test_labels, list_predicted))\n\n # Save model\n model1.save_model_as_json('model1.json')","repo_name":"ahmadnaufalhakim/CNN-MLL","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"3216496869","text":"from __future__ import absolute_import\n\nfrom sqlalchemy.orm import joinedload\n\nfrom changes.api.base import APIView\nfrom changes.models.artifact import Artifact\nfrom changes.models.job import Job\n\n\nclass JobArtifactIndexAPIView(APIView):\n def get(self, job_id):\n job = Job.query.get(job_id)\n if job is None:\n return '', 404\n\n queryset = Artifact.query.options(\n joinedload('step')\n ).filter(\n Artifact.job_id == job.id,\n ).order_by(\n Artifact.name.asc(),\n )\n\n return self.paginate(queryset)\n","repo_name":"dropbox/changes","sub_path":"changes/api/job_artifact_index.py","file_name":"job_artifact_index.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":758,"dataset":"github-code","pt":"72"}
+{"seq_id":"10204586852","text":"import collections\nimport os\nimport zlib\n\nimport ctranslate2\nimport numpy as np\nimport tokenizers\nfrom faster_whisper.audio import decode_audio\nfrom faster_whisper.feature_extractor import FeatureExtractor\n\n\nclass Segment(collections.namedtuple(\"Segment\", (\"start\", \"end\", \"text\"))):\n pass\n\n\nclass AudioInfo(\n collections.namedtuple(\"AudioInfo\", (\"language\", \"language_probability\"))\n):\n pass\n\n\nclass TranscriptionOptions(\n collections.namedtuple(\n \"TranscriptionOptions\",\n (\n \"beam_size\",\n \"best_of\",\n \"patience\",\n \"log_prob_threshold\",\n \"no_speech_threshold\",\n \"compression_ratio_threshold\",\n \"condition_on_previous_text\",\n \"temperatures\",\n ),\n )\n):\n pass\n\n\nclass WhisperModel:\n def __init__(\n self,\n model_path,\n device=\"auto\",\n compute_type=\"default\",\n cpu_threads=0,\n ):\n \"\"\"Initializes the Whisper model.\n\n Args:\n model_path: Path to the converted model.\n device: Device to use for computation (\"cpu\", \"cuda\", \"auto\").\n compute_type: Type to use for computation.\n See https://opennmt.net/CTranslate2/quantization.html.\n cpu_threads: Number of threads to use when running on CPU (4 by default).\n A non zero value overrides the OMP_NUM_THREADS environment variable.\n \"\"\"\n self.model = ctranslate2.models.Whisper(\n model_path,\n device=device,\n compute_type=compute_type,\n intra_threads=cpu_threads,\n )\n\n self.feature_extractor = FeatureExtractor()\n self.decoder = tokenizers.decoders.ByteLevel()\n\n with open(os.path.join(model_path, \"vocabulary.txt\")) as vocab_file:\n self.ids_to_tokens = [line.rstrip(\"\\n\") for line in vocab_file]\n self.tokens_to_ids = {\n token: i for i, token in enumerate(self.ids_to_tokens)\n }\n\n self.eot_id = self.tokens_to_ids[\"<|endoftext|>\"]\n self.timestamp_begin_id = self.tokens_to_ids[\"<|notimestamps|>\"] + 1\n self.input_stride = 2\n self.time_precision = 0.02\n self.max_length = 448\n\n def transcribe(\n self,\n input_file,\n language=None,\n beam_size=5,\n best_of=5,\n patience=1,\n temperature=[0.0, 0.2, 0.4, 0.6, 0.8, 1.0],\n compression_ratio_threshold=2.4,\n log_prob_threshold=-1.0,\n no_speech_threshold=0.6,\n condition_on_previous_text=True,\n ):\n \"\"\"Transcribes an input file.\n\n Arguments:\n input_file: Path to the input file or a file-like object.\n language: The language spoken in the audio. If not set, the language will be\n detected in the first 30 seconds of audio.\n beam_size: Beam size to use for decoding.\n best_of: Number of candidates when sampling with non-zero temperature.\n patience: Beam search patience factor.\n temperature: Temperature for sampling. It can be a tuple of temperatures,\n which will be successively used upon failures according to either\n `compression_ratio_threshold` or `logprob_threshold`.\n compression_ratio_threshold: If the gzip compression ratio is above this value,\n treat as failed.\n log_prob_threshold: If the average log probability over sampled tokens is\n below this value, treat as failed.\n no_speech_threshold: If the no_speech probability is higher than this value AND\n the average log probability over sampled tokens is below `logprob_threshold`,\n consider the segment as silent.\n condition_on_previous_text: If True, the previous output of the model is provided\n as a prompt for the next window; disabling may make the text inconsistent across\n windows, but the model becomes less prone to getting stuck in a failure loop,\n such as repetition looping or timestamps going out of sync.\n\n Returns:\n A tuple with:\n\n - a generator over transcribed segments\n - an instance of AudioInfo\n \"\"\"\n audio = decode_audio(\n input_file, sampling_rate=self.feature_extractor.sampling_rate\n )\n features = self.feature_extractor(audio)\n\n if language is None:\n segment = self.get_segment(features)\n input = self.get_input(segment)\n results = self.model.detect_language(input)\n language_token, language_probability = results[0][0]\n language = language_token[2:-2]\n else:\n language_probability = 1\n\n options = TranscriptionOptions(\n beam_size=beam_size,\n best_of=best_of,\n patience=patience,\n log_prob_threshold=log_prob_threshold,\n no_speech_threshold=no_speech_threshold,\n compression_ratio_threshold=compression_ratio_threshold,\n condition_on_previous_text=condition_on_previous_text,\n temperatures=(\n temperature if isinstance(temperature, (list, tuple)) else [temperature]\n ),\n )\n\n segments = self.generate_segments(features, language, options)\n\n audio_info = AudioInfo(\n language=language,\n language_probability=language_probability,\n )\n\n return segments, audio_info\n\n def generate_segments(self, features, language, options):\n tokenized_segments = self.generate_tokenized_segments(\n features, language, options\n )\n\n for start, end, tokens in tokenized_segments:\n text = self.decode_text_tokens(tokens)\n if not text.strip():\n continue\n\n yield Segment(\n start=start,\n end=end,\n text=text,\n )\n\n def generate_tokenized_segments(self, features, language, options):\n num_frames = features.shape[-1]\n offset = 0\n all_tokens = []\n prompt_reset_since = 0\n\n while offset < num_frames:\n time_offset = offset * self.feature_extractor.time_per_frame\n segment = self.get_segment(features, offset)\n segment_duration = segment.shape[-1] * self.feature_extractor.time_per_frame\n\n previous_tokens = all_tokens[prompt_reset_since:]\n prompt = self.get_prompt(language, previous_tokens)\n result, temperature = self.generate_with_fallback(segment, prompt, options)\n\n if (\n result.no_speech_prob > options.no_speech_threshold\n and result.scores[0] < options.log_prob_threshold\n ):\n offset += segment.shape[-1]\n continue\n\n tokens = result.sequences_ids[0]\n\n consecutive_timestamps = [\n i\n for i in range(len(tokens))\n if i > 0\n and tokens[i] >= self.timestamp_begin_id\n and tokens[i - 1] >= self.timestamp_begin_id\n ]\n\n if len(consecutive_timestamps) > 0:\n last_slice = 0\n for i, current_slice in enumerate(consecutive_timestamps):\n sliced_tokens = tokens[last_slice:current_slice]\n start_timestamp_position = (\n sliced_tokens[0] - self.timestamp_begin_id\n )\n end_timestamp_position = sliced_tokens[-1] - self.timestamp_begin_id\n start_time = (\n time_offset + start_timestamp_position * self.time_precision\n )\n end_time = (\n time_offset + end_timestamp_position * self.time_precision\n )\n\n last_in_window = i + 1 == len(consecutive_timestamps)\n\n # Include the last timestamp so that all tokens are included in a segment.\n if last_in_window:\n sliced_tokens.append(tokens[current_slice])\n\n yield start_time, end_time, sliced_tokens\n last_slice = current_slice\n\n last_timestamp_position = (\n tokens[last_slice - 1] - self.timestamp_begin_id\n )\n offset += last_timestamp_position * self.input_stride\n all_tokens.extend(tokens[: last_slice + 1])\n\n else:\n duration = segment_duration\n timestamps = [\n token for token in tokens if token >= self.timestamp_begin_id\n ]\n if len(timestamps) > 0 and timestamps[-1] != self.timestamp_begin_id:\n last_timestamp_position = timestamps[-1] - self.timestamp_begin_id\n duration = last_timestamp_position * self.time_precision\n\n yield time_offset, time_offset + duration, tokens\n\n offset += segment.shape[-1]\n all_tokens.extend(tokens)\n\n if not options.condition_on_previous_text or temperature > 0.5:\n prompt_reset_since = len(all_tokens)\n\n def decode_text_tokens(self, tokens):\n text_tokens = [\n self.ids_to_tokens[token] for token in tokens if token < self.eot_id\n ]\n\n return self.decoder.decode(text_tokens)\n\n def generate_with_fallback(self, segment, prompt, options):\n features = self.get_input(segment)\n result = None\n final_temperature = None\n\n for temperature in options.temperatures:\n if temperature > 0:\n kwargs = {\n \"beam_size\": 1,\n \"num_hypotheses\": options.best_of,\n \"sampling_topk\": 0,\n \"sampling_temperature\": temperature,\n }\n else:\n kwargs = {\n \"beam_size\": options.beam_size,\n \"patience\": options.patience,\n }\n\n final_temperature = temperature\n result = self.model.generate(\n features,\n [prompt],\n max_length=self.max_length,\n return_scores=True,\n return_no_speech_prob=True,\n **kwargs,\n )[0]\n\n tokens = result.sequences_ids[0]\n text = self.decode_text_tokens(tokens)\n compression_ratio = get_compression_ratio(text)\n\n if (\n compression_ratio <= options.compression_ratio_threshold\n and result.scores[0] >= options.log_prob_threshold\n ):\n break\n\n return result, final_temperature\n\n def get_prompt(self, language, previous_tokens):\n prompt = []\n\n if previous_tokens:\n prompt.append(self.tokens_to_ids[\"<|startofprev|>\"])\n prompt.extend(previous_tokens[-(self.max_length // 2 - 1) :])\n\n prompt += [\n self.tokens_to_ids[\"<|startoftranscript|>\"],\n self.tokens_to_ids[\"<|%s|>\" % language],\n self.tokens_to_ids[\"<|transcribe|>\"],\n ]\n\n return prompt\n\n def get_segment(self, features, offset=0):\n if offset > 0:\n features = features[:, offset:]\n\n num_frames = features.shape[-1]\n required_num_frames = self.feature_extractor.nb_max_frames\n\n if num_frames > required_num_frames:\n features = features[:, :required_num_frames]\n elif num_frames < required_num_frames:\n pad_widths = [(0, 0), (0, required_num_frames - num_frames)]\n features = np.pad(features, pad_widths)\n\n features = np.ascontiguousarray(features)\n return features\n\n def get_input(self, segment):\n segment = np.expand_dims(segment, 0)\n segment = ctranslate2.StorageView.from_array(segment)\n return segment\n\n\ndef get_compression_ratio(text):\n text_bytes = text.encode(\"utf-8\")\n return len(text_bytes) / len(zlib.compress(text_bytes))\n","repo_name":"rhasspy/rhasspy3","sub_path":"programs/asr/faster-whisper/src/faster_whisper/transcribe.py","file_name":"transcribe.py","file_ext":"py","file_size_in_byte":12116,"program_lang":"python","lang":"en","doc_type":"code","stars":189,"dataset":"github-code","pt":"72"}
+{"seq_id":"8810448524","text":"'''\n1/20\n문제 난이도: Gold 4\n문제 유형: 이진 탐색\n추천 풀이 시간: 60분\n내용: 공장 ~ 공장까지 가는 길들에 무게제한이 있다.\n 물건을 옮길때 무게를 최대로 싣고 갈 수 있는 길이 있다.\n 그 최대무게는?\n'''\n# 다리 개수 100,000 중량제한 1,000,000,000\n# log나 루트를 씌워줄만한 껀덕지가 있지않을까.\n# 찾고자 하는 값에 대해 이진탐색을 해야함.(중량에 이진탐색을 한다.)\n# BFS를 이용해서 a -> b로 이동\n# 간선의 개수 m만큼 시간이 걸림. O(m)\n# O(M * logC)하면 약 300만이 걸림. 금방이네.\n\n# 일단 공장하나의 최소중량 최대중량을 정해서 그거로 start end놓고 이진탐색\n\n###################### ######################\n# 나동빈\nfrom collections import deque\n\nn, m = map(int, input().split(' '))\nadj = [[] for _ in range(n+1)]\n\ndef bfs(c):\n queue = deque([start_node]) # deque = [start]\n visited = [False] * (n+1)\n visited[start_node] = True\n\n while queue:\n x = queue.popleft() # 시작노드를 queue에서 제거\n for y, weight in adj[x]:\n if not visited[y] and weight >= c: # 안 가봤고, c무게보다 크다면\n visited[y] = True\n queue.append(y)\n return visited[end_node]\n\nstart = 1000000000\nend = 1\n\nfor _ in range(m):\n x, y, weight = map(int, input().split(' '))\n adj[x].append((y,weight))\n adj[y].append((x,weight))\n start = min(start, weight)\n end = max(end, weight)\n\nstart_node, end_node = map(int, input().split(' '))\n\nresult = start\nwhile (start <= end):\n mid = (start + end) // 2 # mid는 현재 중량을 의미\n if bfs(mid): # 이동 가능하므로, 중량을 증가시킴\n result = mid\n start = mid + 1\n else: # 이동 불가능하므로, 중량을 감소시킴\n end = mid - 1\n\nprint(result)","repo_name":"Minsik113/Algorithm-practice","sub_path":"문제풀이/03_탐색/09_1939_중량제한.py","file_name":"09_1939_중량제한.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"2493596848","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 13 12:41:06 2020\n\n@author: abdul\n\"\"\"\n\nfrom random import randint\nimport time\n\nWIDTH = 800\nHEIGHT = 600\nCENTER_X = WIDTH / 2\nCENTER_Y = HEIGHT / 2\n\ngame_over = False\nfinalised = False\n\ngarden_happy = True\nfangflower_collision = False\n\ntime_elapsed = 0\nstart_time = time.time()\n\ncow = Actor(\"cow\")\ncow.pos = 100, 500\n\nflower_list = []\nwilted_list = []\nfangflower_list = []\nfangflower_vy_list = []\nfangflower_vx_list = []\n\ndef draw():\n global game_over, time_elapsed, finalized\n if not game_over:\n screen.clear()\n screen.blit(\"garden\", (0, 0))\n cow.draw()\n for flower in flower_list:\n flower.draw()\n for fangflower in fangflower_list:\n fangflower.draw()\n time_elapsed = int(time.time() - start_time)\n screen.draw.text(\n \"Garden happy for: \" +\n str(time_elapsed) + \" seconds\",\n topleft=(10, 10), color=\"black\"\n )\n else:\n if not finalized:\n cow.draw()\n screen.draw.text(\n \"Garden happy for: \" +\n str(time_elapsed) + \" seconds\",\n topleft=(10, 10), color=\"black\"\n )\n if (not garden_happy):\n screen.draw.text(\n \"GARDEN UNHAPPY - GAME OVER!\", color=\"black\",\n topleft=(10, 50)\n )\n finalized = True\n else:\n screen.draw.text(\"FANGFLOWER ATTACK - GAME OVER!\", color=\"black\",\n topleft=(10, 50)\n )\n finalized = True\n return\n\ndef new_flower():\n global flower_list, wilted_list\n flower_new = Actor(\"flower\")\n flower_new.pos = randint(50, WIDTH - 50), randint(150, HEIGHT - 100)\n flower_list.append(flower_new)\n wilted_list.append(\"happy\")\n return\n\ndef add_flowers():\n global game_over\n if not game_over:\n new_flower()\n clock.schedule(add_flowers, 4)\n return\n\ndef check_wilt_times():\n global wilted_list, game_over, garden_happy\n if wilted_list:\n for wilted_since in wilted_list:\n if (not wilted_since == \"happy\"):\n time_wilted = int(time.time() - wilted_since)\n if (time_wilted) > 10.0:\n garden_happy = False\n game_over = True\n break\n return\n\ndef wilt_flower():\n global flower_list, wilted_list, game_over\n if not game_over:\n if flower_list:\n rand_flower = randint(0, len(flower_list) - 1)\n if (flower_list[rand_flower].image == \"flower\"):\n flower_list[rand_flower].image = \"flower-wilt\"\n wilted_list[rand_flower] = time.time()\n clock.schedule(wilt_flower, 3)\n return\n\ndef check_flower_collision():\n global cow, flower_list, wilted_list\n index = 0\n for flower in flower_list:\n if (flower.colliderect(cow) and\n flower.image == \"flower-wilt\"):\n flower.image = \"flower\"\n wilted_list[index] = \"happy\"\n break\n index = index + 1\n return\n\ndef check_fangflower_collision():\n global cow, fangflower_list, fangflower_collision\n global game_over\n for fangflower in fangflower_list:\n if fangflower.colliderect(cow):\n cow.image = \"zap\"\n game_over = True\n break\n return\n\ndef velocity():\n random_dir = randint(0, 1)\n random_velocity = randint(2, 3)\n if random_dir == 0:\n return -random_velocity\n else:\n return random_velocity\n \ndef mutate():\n global flower_list, fangflower_list, fangflower_vy_list\n global fangflower_vx_list, game_over\n if not game_over and flower_list:\n rand_flower = randint(0, len(flower_list) - 1)\n fangflower_pos_x = flower_list[rand_flower].x\n fangflower_pos_y = flower_list[rand_flower].y\n del flower_list[rand_flower]\n fangflower = Actor(\"fangflower\")\n fangflower.pos = fangflower_pos_x, fangflower_pos_y\n fangflower_vx = velocity()\n fangflower_vy = velocity()\n fangflower = fangflower_list.append(fangflower)\n fangflower_vx_list.append(fangflower_vx)\n fangflower_vy_list.append(fangflower_vy)\n clock.schedule(mutate, 20)\n return \n\ndef update_fangflowers():\n global fangflower_list, game_over\n if not game_over:\n index = 0\n for fangflower in fangflower_list:\n fangflower_vx = fangflower_vx_list[index]\n fangflower_vy = fangflower_vy_list[index]\n fangflower.x = fangflower.x + fangflower_vx\n fangflower.y = fangflower.y + fangflower_vy\n if fangflower.left < 0:\n fangflower_vx_list[index] = -fangflower_vx\n if fangflower.right > WIDTH:\n fangflower_vx_list[index] = -fangflower_vx\n if fangflower.top < 150:\n fangflower_vy_list[index] = -fangflower_vy\n if fangflower.bottom > HEIGHT:\n fangflower_vy_list[index] = -fangflower_vy\n index = index + 1\n return\n\ndef reset_cow():\n global game_over\n if not game_over:\n cow.image = \"cow\"\n return\n\nadd_flowers()\nwilt_flower()\n\ndef update():\n global score, game_over, fangflower_collision\n global flower_list, fangflower_list, time_elapsed\n fangflower_collision = check_fangflower_collision()\n check_wilt_times()\n if not game_over:\n if keyboard.space:\n cow.image = \"cow-water\"\n clock.schedule(reset_cow, 0.5)\n check_flower_collision()\n if keyboard.left and cow.x > 0:\n cow.x -= 5\n elif keyboard.right and cow.x < WIDTH:\n cow.x += 5\n elif keyboard.up and cow.y > 150:\n cow.y -= 5\n elif keyboard.down and cow.y < HEIGHT:\n cow.y += 5\n if time_elapsed > 15 and not fangflower_list:\n mutate()\n update_fangflowers()\n \n","repo_name":"munnawarms/python-scripts","sub_path":"happy-garden/garden.py","file_name":"garden.py","file_ext":"py","file_size_in_byte":6083,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"17067686064","text":"import django_tables2 as tables\nfrom django_tables2.utils import Accessor\n\nfrom dcim.models import Interface\nfrom tenancy.tables import COL_TENANT\nfrom utilities.tables import BaseTable, BooleanColumn, ToggleColumn\nfrom .models import Aggregate, IPAddress, Prefix, RIR, Role, Service, VLAN, VLANGroup, VRF\n\nRIR_UTILIZATION = \"\"\"\n\n {% if record.stats.total %}\n
\n {{ record.stats.percentages.active }}%\n
\n
\n {{ record.stats.percentages.reserved }}%\n
\n
\n {{ record.stats.percentages.deprecated }}%\n
\n
\n {{ record.stats.percentages.available }}%\n
\n {% endif %}\n
\n\"\"\"\n\nRIR_ACTIONS = \"\"\"\n\n \n\n{% if perms.ipam.change_rir %}\n \n{% endif %}\n\"\"\"\n\nUTILIZATION_GRAPH = \"\"\"\n{% load helpers %}\n{% if record.pk %}{% utilization_graph record.get_utilization %}{% else %}—{% endif %}\n\"\"\"\n\nROLE_PREFIX_COUNT = \"\"\"\n{{ value }}\n\"\"\"\n\nROLE_VLAN_COUNT = \"\"\"\n{{ value }}\n\"\"\"\n\nROLE_ACTIONS = \"\"\"\n\n \n\n{% if perms.ipam.change_role %}\n \n{% endif %}\n\"\"\"\n\nPREFIX_LINK = \"\"\"\n{% if record.has_children %}\n \n{% else %}\n \n{% endif %}\n {{ record.prefix }}\n\n\"\"\"\n\nPREFIX_ROLE_LINK = \"\"\"\n{% if record.role %}\n {{ record.role }}\n{% else %}\n —\n{% endif %}\n\"\"\"\n\nIPADDRESS_LINK = \"\"\"\n{% if record.pk %}\n {{ record.address }}\n{% elif perms.ipam.add_ipaddress %}\n {% if record.0 <= 65536 %}{{ record.0 }}{% else %}Many{% endif %} IP{{ record.0|pluralize }} available\n{% else %}\n {% if record.0 <= 65536 %}{{ record.0 }}{% else %}Many{% endif %} IP{{ record.0|pluralize }} available\n{% endif %}\n\"\"\"\n\nIPADDRESS_ASSIGN_LINK = \"\"\"\n{{ record }}\n\"\"\"\n\nIPADDRESS_PARENT = \"\"\"\n{% if record.interface %}\n {{ record.interface.parent }}\n{% else %}\n —\n{% endif %}\n\"\"\"\n\nVRF_LINK = \"\"\"\n{% if record.vrf %}\n {{ record.vrf }}\n{% elif prefix.vrf %}\n {{ prefix.vrf }}\n{% else %}\n Global\n{% endif %}\n\"\"\"\n\nSTATUS_LABEL = \"\"\"\n{% if record.pk %}\n {{ record.get_status_display }}\n{% else %}\n Available\n{% endif %}\n\"\"\"\n\nVLAN_LINK = \"\"\"\n{% if record.pk %}\n {{ record.vid }}\n{% elif perms.ipam.add_vlan %}\n {{ record.available }} VLAN{{ record.available|pluralize }} available\n{% else %}\n {{ record.available }} VLAN{{ record.available|pluralize }} available\n{% endif %}\n\"\"\"\n\nVLAN_PREFIXES = \"\"\"\n{% for prefix in record.prefixes.all %}\n {{ prefix }}{% if not forloop.last %}
{% endif %}\n{% empty %}\n —\n{% endfor %}\n\"\"\"\n\nVLAN_ROLE_LINK = \"\"\"\n{% if record.role %}\n {{ record.role }}\n{% else %}\n —\n{% endif %}\n\"\"\"\n\nVLANGROUP_ACTIONS = \"\"\"\n\n \n\n{% with next_vid=record.get_next_available_vid %}\n {% if next_vid and perms.ipam.add_vlan %}\n \n \n \n {% endif %}\n{% endwith %}\n{% if perms.ipam.change_vlangroup %}\n \n{% endif %}\n\"\"\"\n\nVLAN_MEMBER_UNTAGGED = \"\"\"\n{% if record.untagged_vlan_id == vlan.pk %}\n \n{% endif %}\n\"\"\"\n\nVLAN_MEMBER_ACTIONS = \"\"\"\n{% if perms.dcim.change_interface %}\n \n{% endif %}\n\"\"\"\n\nTENANT_LINK = \"\"\"\n{% if record.tenant %}\n {{ record.tenant }}\n{% elif record.vrf.tenant %}\n {{ record.vrf.tenant }}*\n{% else %}\n —\n{% endif %}\n\"\"\"\n\n\n#\n# VRFs\n#\n\nclass VRFTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn()\n rd = tables.Column(verbose_name='RD')\n tenant = tables.TemplateColumn(template_code=COL_TENANT)\n\n class Meta(BaseTable.Meta):\n model = VRF\n fields = ('pk', 'name', 'rd', 'tenant', 'description')\n\n\n#\n# RIRs\n#\n\nclass RIRTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn(verbose_name='Name')\n is_private = BooleanColumn(verbose_name='Private')\n aggregate_count = tables.Column(verbose_name='Aggregates')\n actions = tables.TemplateColumn(template_code=RIR_ACTIONS, attrs={'td': {'class': 'text-right noprint'}}, verbose_name='')\n\n class Meta(BaseTable.Meta):\n model = RIR\n fields = ('pk', 'name', 'is_private', 'aggregate_count', 'actions')\n\n\nclass RIRDetailTable(RIRTable):\n stats_total = tables.Column(\n accessor='stats.total',\n verbose_name='Total',\n footer=lambda table: sum(r.stats['total'] for r in table.data)\n )\n stats_active = tables.Column(\n accessor='stats.active',\n verbose_name='Active',\n footer=lambda table: sum(r.stats['active'] for r in table.data)\n )\n stats_reserved = tables.Column(\n accessor='stats.reserved',\n verbose_name='Reserved',\n footer=lambda table: sum(r.stats['reserved'] for r in table.data)\n )\n stats_deprecated = tables.Column(\n accessor='stats.deprecated',\n verbose_name='Deprecated',\n footer=lambda table: sum(r.stats['deprecated'] for r in table.data)\n )\n stats_available = tables.Column(\n accessor='stats.available',\n verbose_name='Available',\n footer=lambda table: sum(r.stats['available'] for r in table.data)\n )\n utilization = tables.TemplateColumn(\n template_code=RIR_UTILIZATION,\n verbose_name='Utilization'\n )\n\n class Meta(RIRTable.Meta):\n fields = (\n 'pk', 'name', 'is_private', 'aggregate_count', 'stats_total', 'stats_active', 'stats_reserved',\n 'stats_deprecated', 'stats_available', 'utilization', 'actions',\n )\n\n\n#\n# Aggregates\n#\n\nclass AggregateTable(BaseTable):\n pk = ToggleColumn()\n prefix = tables.LinkColumn(verbose_name='Aggregate')\n date_added = tables.DateColumn(format=\"Y-m-d\", verbose_name='Added')\n\n class Meta(BaseTable.Meta):\n model = Aggregate\n fields = ('pk', 'prefix', 'rir', 'date_added', 'description')\n\n\nclass AggregateDetailTable(AggregateTable):\n child_count = tables.Column(verbose_name='Prefixes')\n utilization = tables.TemplateColumn(UTILIZATION_GRAPH, orderable=False, verbose_name='Utilization')\n\n class Meta(AggregateTable.Meta):\n fields = ('pk', 'prefix', 'rir', 'child_count', 'utilization', 'date_added', 'description')\n\n\n#\n# Roles\n#\n\nclass RoleTable(BaseTable):\n pk = ToggleColumn()\n prefix_count = tables.TemplateColumn(\n accessor=Accessor('prefixes.count'),\n template_code=ROLE_PREFIX_COUNT,\n orderable=False,\n verbose_name='Prefixes'\n )\n vlan_count = tables.TemplateColumn(\n accessor=Accessor('vlans.count'),\n template_code=ROLE_VLAN_COUNT,\n orderable=False,\n verbose_name='VLANs'\n )\n actions = tables.TemplateColumn(template_code=ROLE_ACTIONS, attrs={'td': {'class': 'text-right noprint'}}, verbose_name='')\n\n class Meta(BaseTable.Meta):\n model = Role\n fields = ('pk', 'name', 'prefix_count', 'vlan_count', 'slug', 'actions')\n\n\n#\n# Prefixes\n#\n\nclass PrefixTable(BaseTable):\n pk = ToggleColumn()\n prefix = tables.TemplateColumn(PREFIX_LINK, attrs={'th': {'style': 'padding-left: 17px'}})\n status = tables.TemplateColumn(STATUS_LABEL)\n vrf = tables.TemplateColumn(VRF_LINK, verbose_name='VRF')\n tenant = tables.TemplateColumn(template_code=TENANT_LINK)\n site = tables.LinkColumn('dcim:site', args=[Accessor('site.slug')])\n vlan = tables.LinkColumn('ipam:vlan', args=[Accessor('vlan.pk')], verbose_name='VLAN')\n role = tables.TemplateColumn(PREFIX_ROLE_LINK)\n\n class Meta(BaseTable.Meta):\n model = Prefix\n fields = ('pk', 'prefix', 'status', 'vrf', 'tenant', 'site', 'vlan', 'role', 'description')\n row_attrs = {\n 'class': lambda record: 'success' if not record.pk else '',\n }\n\n\nclass PrefixDetailTable(PrefixTable):\n utilization = tables.TemplateColumn(UTILIZATION_GRAPH, orderable=False)\n tenant = tables.TemplateColumn(template_code=COL_TENANT)\n\n class Meta(PrefixTable.Meta):\n fields = ('pk', 'prefix', 'status', 'vrf', 'utilization', 'tenant', 'site', 'vlan', 'role', 'description')\n\n\n#\n# IPAddresses\n#\n\nclass IPAddressTable(BaseTable):\n pk = ToggleColumn()\n address = tables.TemplateColumn(IPADDRESS_LINK, verbose_name='IP Address')\n vrf = tables.TemplateColumn(VRF_LINK, verbose_name='VRF')\n status = tables.TemplateColumn(STATUS_LABEL)\n tenant = tables.TemplateColumn(template_code=TENANT_LINK)\n parent = tables.TemplateColumn(IPADDRESS_PARENT, orderable=False)\n interface = tables.Column(orderable=False)\n\n class Meta(BaseTable.Meta):\n model = IPAddress\n fields = (\n 'pk', 'address', 'vrf', 'status', 'role', 'tenant', 'parent', 'interface', 'dns_name', 'description',\n )\n row_attrs = {\n 'class': lambda record: 'success' if not isinstance(record, IPAddress) else '',\n }\n\n\nclass IPAddressDetailTable(IPAddressTable):\n nat_inside = tables.LinkColumn(\n 'ipam:ipaddress', args=[Accessor('nat_inside.pk')], orderable=False, verbose_name='NAT (Inside)'\n )\n tenant = tables.TemplateColumn(template_code=COL_TENANT)\n\n class Meta(IPAddressTable.Meta):\n fields = (\n 'pk', 'address', 'vrf', 'status', 'role', 'tenant', 'nat_inside', 'parent', 'interface', 'dns_name',\n 'description',\n )\n\n\nclass IPAddressAssignTable(BaseTable):\n address = tables.TemplateColumn(IPADDRESS_ASSIGN_LINK, verbose_name='IP Address')\n status = tables.TemplateColumn(STATUS_LABEL)\n parent = tables.TemplateColumn(IPADDRESS_PARENT, orderable=False)\n interface = tables.Column(orderable=False)\n\n class Meta(BaseTable.Meta):\n model = IPAddress\n fields = ('address', 'vrf', 'status', 'role', 'tenant', 'parent', 'interface', 'description')\n orderable = False\n\n\nclass InterfaceIPAddressTable(BaseTable):\n \"\"\"\n List IP addresses assigned to a specific Interface.\n \"\"\"\n address = tables.TemplateColumn(IPADDRESS_ASSIGN_LINK, verbose_name='IP Address')\n vrf = tables.TemplateColumn(VRF_LINK, verbose_name='VRF')\n status = tables.TemplateColumn(STATUS_LABEL)\n tenant = tables.TemplateColumn(template_code=TENANT_LINK)\n\n class Meta(BaseTable.Meta):\n model = IPAddress\n fields = ('address', 'vrf', 'status', 'role', 'tenant', 'description')\n\n\n#\n# VLAN groups\n#\n\nclass VLANGroupTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn(verbose_name='Name')\n site = tables.LinkColumn('dcim:site', args=[Accessor('site.slug')], verbose_name='Site')\n vlan_count = tables.Column(verbose_name='VLANs')\n slug = tables.Column(verbose_name='Slug')\n actions = tables.TemplateColumn(template_code=VLANGROUP_ACTIONS, attrs={'td': {'class': 'text-right noprint'}},\n verbose_name='')\n\n class Meta(BaseTable.Meta):\n model = VLANGroup\n fields = ('pk', 'name', 'site', 'vlan_count', 'slug', 'actions')\n\n\n#\n# VLANs\n#\n\nclass VLANTable(BaseTable):\n pk = ToggleColumn()\n vid = tables.TemplateColumn(VLAN_LINK, verbose_name='ID')\n site = tables.LinkColumn('dcim:site', args=[Accessor('site.slug')])\n group = tables.LinkColumn('ipam:vlangroup_vlans', args=[Accessor('group.pk')], verbose_name='Group')\n tenant = tables.TemplateColumn(template_code=COL_TENANT)\n status = tables.TemplateColumn(STATUS_LABEL)\n role = tables.TemplateColumn(VLAN_ROLE_LINK)\n\n class Meta(BaseTable.Meta):\n model = VLAN\n fields = ('pk', 'vid', 'site', 'group', 'name', 'tenant', 'status', 'role', 'description')\n row_attrs = {\n 'class': lambda record: 'success' if not isinstance(record, VLAN) else '',\n }\n\n\nclass VLANDetailTable(VLANTable):\n prefixes = tables.TemplateColumn(VLAN_PREFIXES, orderable=False, verbose_name='Prefixes')\n tenant = tables.TemplateColumn(template_code=COL_TENANT)\n\n class Meta(VLANTable.Meta):\n fields = ('pk', 'vid', 'site', 'group', 'name', 'prefixes', 'tenant', 'status', 'role', 'description')\n\n\nclass VLANMemberTable(BaseTable):\n parent = tables.LinkColumn(order_by=['device', 'virtual_machine'])\n name = tables.LinkColumn(verbose_name='Interface')\n untagged = tables.TemplateColumn(\n template_code=VLAN_MEMBER_UNTAGGED,\n orderable=False\n )\n actions = tables.TemplateColumn(\n template_code=VLAN_MEMBER_ACTIONS,\n attrs={'td': {'class': 'text-right noprint'}},\n verbose_name=''\n )\n\n class Meta(BaseTable.Meta):\n model = Interface\n fields = ('parent', 'name', 'untagged', 'actions')\n\n\nclass InterfaceVLANTable(BaseTable):\n \"\"\"\n List VLANs assigned to a specific Interface.\n \"\"\"\n vid = tables.LinkColumn('ipam:vlan', args=[Accessor('pk')], verbose_name='ID')\n tagged = BooleanColumn()\n site = tables.LinkColumn('dcim:site', args=[Accessor('site.slug')])\n group = tables.Column(accessor=Accessor('group.name'), verbose_name='Group')\n tenant = tables.TemplateColumn(template_code=COL_TENANT)\n status = tables.TemplateColumn(STATUS_LABEL)\n role = tables.TemplateColumn(VLAN_ROLE_LINK)\n\n class Meta(BaseTable.Meta):\n model = VLAN\n fields = ('vid', 'tagged', 'site', 'group', 'name', 'tenant', 'status', 'role', 'description')\n\n def __init__(self, interface, *args, **kwargs):\n self.interface = interface\n super().__init__(*args, **kwargs)\n\n\n#\n# Services\n#\n\nclass ServiceTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn(\n viewname='ipam:service',\n args=[Accessor('pk')]\n )\n\n class Meta(BaseTable.Meta):\n model = Service\n fields = ('pk', 'name', 'parent', 'protocol', 'port', 'description')\n","repo_name":"mtbutler07/netbox-heroku","sub_path":"netbox/ipam/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":17374,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"}
+{"seq_id":"1944288083","text":"# Tool to convert a VCF file into a sqlite database\n# utf-8 encoding, user must input 1. human genome assembly (e.g. hg19) for the file, 2. new db name, 3. source vcf file\n# Contains some functions from the UCSC reference beacon implementation\n# Author - Alex Mankovich, alex.mankovich@philips.com, Philips Research North America\n# Modified from ucscBeacon reference implementation available at https://github.com/maximilianh/ucscBeacon\n# Apache 2.0 License\n\nimport sqlite3, gzip, optparse\nfrom os.path import join, isfile, dirname, isdir, basename, sys\n\nfrom joseph.beacon.beacon_api import *\n\n\ndef argParser():\n\t# Parsing command-line arguments\n\t\" parse command line options into args and options \"\n\tparser = optparse.OptionParser (\n\t\t\"\"\"usage: %prog [options] [referenceDb] [datasetName] filename(s) - import VCF, complete genomics or BED files into the beacon database.\"\"\")\n\n\t(options, args) = parser.parse_args ()\n\n\tif len (args) == 0:\n\t\tparser.print_help ()\n\t\tsys.exit (0)\n\treturn args, options\n\n\ndef dbCreateTable(conn, tableName):\n\t\" create an empty table with chrom/pos/allele fields \"\n\tconn.execute (\"DROP TABLE IF EXISTS %s\" % tableName)\n\tconn.commit ()\n\n\t_tableDef = (\n\t\t'CREATE TABLE IF NOT EXISTS %s '\n\t\t'('\n\t\t' chrom text,' # chromosome\n\t\t' pos int,' # start position, 0-based\n\t\t' allele text' # alternate allele, can also be IATG = insertion of ATG or D15 = deletion of 15 bp\n\t\t')')\n\tconn.execute (_tableDef % tableName)\n\tconn.commit ()\n\n\ndef dbFileName(refDB, datasetName):\n\tdbDir = dirname (__file__)\n\tdbName = '{0}.{1}.sqlite'.format (datasetName, refDB)\n\tdbPath = join (dbDir, dbName)\n\treturn dbPath\n\n\ndef dbOpen(refDB, datasetName, mustExist=False):\n\tdbName = dbFileName (refDB, datasetName)\n\tif not isfile (dbName) and mustExist:\n\t\treturn None\n\tconn = sqlite3.Connection (dbName)\n\treturn conn\n\n\ndef dbGetTables(conn):\n\tcursor = conn.cursor ()\n\tcursor.execute (\"SELECT name FROM sqlite_master WHERE type='table';\")\n\trows = cursor.fetchall ()\n\ttables = []\n\tfor row in rows:\n\t\ttables.append (row[0])\n\treturn tables\n\n\ndef dbQuery(conn, query, parameters):\n\tcursor = conn.cursor ()\n\tif params == None:\n\t\tcursor.execute (query)\n\telse:\n\t\tcursor.execute (query, parameters)\n\treturn cursor.fetchall ()\n\n\ndef readAllelesVcf(ifh):\n\trows = []\n\tskipCount = 0\n\temptyCount = 0\n\tfor chunk in ifh.chunks ():\n\t\tfor line in chunk.decode ('utf-8').splitlines ():\n\t\t\tif line.startswith (\"#\"):\n\t\t\t\tcontinue\n\t\t\tfields = str.split (line.rstrip (\"\\n\"), \"\\t\", maxsplit=5)\n\t\t\tchrom, pos, varId, ref, alt = fields[:5]\n\t\t\tif chrom.startswith (\"chr\"):\n\t\t\t\tchrom = chrom.replace (\"chr\", \"\")\n\t\t\tpos = int (pos) - 1\n\t\t\tif alt == \".\":\n\t\t\t\temptyCount += 1\n\t\t\t\tcontinue\n\t\t\trefIsOne = len (ref) == 1\n\t\t\taltIsOne = len (alt) == 1\n\n\t\t\tif refIsOne and altIsOne:\n\t\t\t\tallele = alt\n\t\t\telif not refIsOne and altIsOne:\n\t\t\t\tallele = \"D\" + str (len (ref) - 1)\n\t\t\t\tpos += 1\n\t\t\telif refIsOne and not altIsOne:\n\t\t\t\tallele = \"I\" + alt[1:]\n\t\t\t\tpos += 1\n\t\t\telif not refIsOne and not altIsOne:\n\t\t\t\tskipCount += 1\n\t\t\telse:\n\t\t\t\tprint (\"Invalid VCF fields:\", fields)\n\t\t\t\tsys.exit (1)\n\t\t\tdataRow = (chrom, pos, allele)\n\t\t\trows.append (dataRow)\n\t\tprint (\"Skipped %d lines with empty ALT alleles\" % emptyCount)\n\t\tprint (\"Skipped %d lines with both ALT and REF alleles len!=1, cannot encode as queries\" % skipCount)\n\treturn rows\n\n\ndef importFiles(refDB, ifh, datasetName):\n\tconn = dbOpen (refDB, datasetName)\n\tdbCreateTable (conn, datasetName)\n\n\tconn.execute (\"PRAGMA synchronous=OFF\")\n\tconn.execute (\"PRAGMA count_changes=OFF\") # http://blog.quibb.org/2010/08/fast-bulk-inserts-into-sqlite/\n\tconn.execute (\"PRAGMA cache_size=800000\") # http://web.utk.edu/~jplyon/sqlite/SQLite_optimization_FAQ.html\n\tconn.execute (\"PRAGMA journal_mode=OFF\") # http://www.sqlite.org/pragma.html#pragma_journal_mode\n\tconn.execute (\"PRAGMA temp_store=memory\")\n\tconn.commit ()\n\n\n\talleles = readAllelesVcf (ifh)\n\tprint (\"Loading alleles into db %s\" % dbFileName (refDB, datasetName))\n\tsql = \"INSERT INTO %s (chrom, pos, allele) VALUES (?,?,?)\" % datasetName\n\tconn.executemany (sql, alleles)\n\tconn.commit ()\n\n\tprint (\"Indexing db table\")\n\tconn.execute (\"CREATE UNIQUE INDEX '%s_index' ON '%s' ('chrom','pos','allele')\" % \\\n\t (datasetName, datasetName))\n\n\ndef main():\n\targs, options = argParser ()\n\trefDB = args[0]\n\tdatasetName = args[1]\n\tfileName = args[2]\n\tif len (args) < 3:\n\t\tprint (\"Specify refdb, datasetname, and vcf filename\")\n\t\tsys.exit (1)\n\tif refDB not in list_references ():\n\t\tprint (\"The reference assembly '%s' is not valid.\" % refDB)\n\t\tprint (\"Please specify one of these reference assemblies:\")\n\t\tprint (list_references ())\n\t\tsys.exit (1)\n\timportFiles (refDB, fileName, datasetName)\n\n\nif __name__ == \"__main__\":\n\tmain ()\n","repo_name":"josephqin-zz/clinicalTrailsUI","sub_path":"joseph/beacon/beacon_import_data.py","file_name":"beacon_import_data.py","file_ext":"py","file_size_in_byte":4697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"43652756916","text":"from math import *\nG = 6.67430e-11\n\nclass Astre:\n def __init__(self, rayon: float, masse: float, x: int, y: int) -> None:\n self.r = rayon\n self.m = masse\n self.x = x\n self.y = y\n \n","repo_name":"LeGrandCthulhu/Projet-Physique","sub_path":"Classes.py","file_name":"Classes.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"5974416676","text":"# -*- encoding: utf-8 -*-\n#\n\"\"\"\nFirst, install the latest release of Python wrapper: $ pip install ovh\n\"\"\"\nimport json\nimport os # pour récupérer les variables d'env\nimport re # import regex\n\nimport ovh # export ovh api\nfrom decouple import config\n\n# if service expired, you need to exclude them.\nexclude_domains = [\"bimscreen.fr\"]\n\n# Instantiate. Visit https://api.ovh.com/createToken/?GET=/me\n# # to get your credentials\nclient = ovh.Client(\n endpoint=config(\"OVH_ENDPOINT\"),\n application_key=config(\"OVH_APPLICATION_KEY\"),\n application_secret=config(\"OVH_APPLICATION_SECRET\"),\n consumer_key=config(\"OVH_CONSUMER_KEY\"),\n)\n\n# print headers\nprint('\"domain\";\"subdomain\";\"type\";\"record\"')\n\n# Print dns zone for each domain\n\ndomains = client.get(\"/domain/zone/\")\nfor domain in domains:\n if domain not in exclude_domains:\n details = client.get(\"/domain/zone/%s/export\" % domain)\n detailssansovh = re.sub(\".*.ovh.net.*\", \"\", details)\n regex1 = \".*IN.A.*\"\n regex2 = \".*IN.CNAME.*\"\n regexList = [regex1, regex2]\n\n for regex in regexList:\n filtereddetails = re.findall(regex, detailssansovh)\n for finding in filtereddetails:\n tmp = finding.split()\n\n # cas sous domaine vide\n if tmp[0] == \"IN\":\n tmp.insert(0, \"\")\n\n # remove 'IN'\n tmp.remove(\"IN\")\n\n # add domain\n tmp.insert(0, domain)\n\n for i, elem in enumerate(tmp):\n print('\"' + elem + '\"', end=\"\")\n if i != len(tmp) - 1:\n print(\";\", end=\"\")\n print()\n","repo_name":"rdia9/ovh-api","sub_path":"List-DNSZonesInDomains.py","file_name":"List-DNSZonesInDomains.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"3289987486","text":"with open('4713_A.txt') as f:\n n = int(f.readline())\n sp = []\n s = 0\n minn = 9999999999999999999999999999999999999\n for line in f:\n a, b = map(int, line.split())\n b = b // 36 + bool(b % 36)\n sp.append((a, b))\nsp.sort()\nfor i in range(len(sp)):\n s=0\n for j in range(len(sp)):\n s+=abs(sp[i][0]-sp[j][0])*sp[j][1]\n minn=min(s,minn)\nprint(minn)\n","repo_name":"olgaObnosova/EGE","sub_path":"27/4713_kege_perebor.py","file_name":"4713_kege_perebor.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"4014477121","text":"#!/usr/bin/python3\n\nimport os\nimport re\nimport sys\nimport ipaddress\nimport argparse\nfrom subprocess import Popen, PIPE\n\nfrom thirdparty import ping\n\n#############################################################################\n#############################################################################\n\ndef run_cmd(cmd):\n process = Popen(cmd, stdout=PIPE)\n (output, err) = process.communicate()\n exit_code = process.wait()\n return output\n\n#############################################################################\n\ndef scan_all(network):\n ipn = ipaddress.ip_network(network)\n result = ping.multi_ping_query([str(ip) for ip in ipn.hosts()])\n return [h for (h, tm) in result.items() if tm is not None]\n\n#############################################################################\n\ndef traceroute(ip):\n troute = run_cmd([\"traceroute\", \"-w 1\", \"-n\", ip]).decode(\"utf-8\")\n trace = []\n for line in troute.split(\"\\n\"):\n m = re.match(r\"\\s*\\d+\\s+(\\S+)\", line)\n if m:\n host = m.group(1)\n if host != \"*\":\n trace.append(host)\n trace.append(ip)\n return trace\n\n#############################################################################\n\ndef run(args):\n with open(args.output, \"w\") as fw:\n for host in scan_all(args.network[0]):\n print(host)\n tr = traceroute(host)\n for i in range(len(tr) - 2):\n fw.write(\"\\\"%s\\\" -- \\\"%s\\\";\\n\" % (tr[i], tr[i + 1]))\n\n#############################################################################\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-o\", \"--output\", type=str, required=True,\n help=\"output file, e.g. 'subnet1.pairs'\")\n parser.add_argument(\"network\", metavar=\"address\", type=str, nargs=1,\n help=\"network address to scan, e.g. '192.168.0.0/16'\")\n args = parser.parse_args()\n run(args)\n","repo_name":"dsiroky/autonetworkmapper","sub_path":"scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"36054069222","text":"## To run this file, installation of \"requests\" and python3 is required\r\n## The format in which the command line arguments are is URL1, URL 2, Filename\r\n## Ex, filepath\\.. py example.py http:URL1 http:URL2 example(csvfile) \r\n\r\nimport requests\t\r\nimport time\r\nimport sys\r\nimport csv\r\n\r\n#CONSTANTS\r\nDELAY = 1\r\nFEILD_NAME = 'value'\r\nHEAD_1 = \"timestamp\"\r\nHEAD_2 = \"Power\"\r\nHEAD_3 = \"Current\"\r\n\r\n#Argument validation and assigning URL\r\nif (sys.argv[1]).startswith('http'):\r\n URL = (sys.argv[1])\r\nelse:\r\n print(\"Please add URL for argument 1\")\r\n\r\nif (sys.argv[2]).startswith('http'):\r\n URL2 = (sys.argv[2])\r\nelse:\r\n print(\"Please add URL for argument 2\")\r\n\r\n#Get request \r\nsess = requests.session()\r\n\r\n#Writing and saving data into a csv\r\nif len(sys.argv) == 3:\r\n f = open('default.csv','w ') \r\nelse:\r\n f = open(sys.argv[3] + \".csv\",'a')\r\n \r\nwriter = csv.DictWriter(f, fieldnames=[HEAD_1, HEAD_2, HEAD_3])\r\nwriter.writeheader()\r\n\r\n#Loop to get data from URL and print into terminal and csv \r\nwhile True:\r\n\r\n req = sess.get(URL)\r\n json_data = requests.get(URL).json()\r\n result = json_data[FEILD_NAME]\r\n \r\n req = sess.get(URL2)\r\n json_data = requests.get(URL2).json()\r\n result2 = json_data[FEILD_NAME]\r\n \r\n now = int( time.time() )\r\n print(now,\".....\", result, result2)\r\n\r\n time.sleep(DELAY)\r\n \r\n f.writelines(str(now) +\",\" + str(result) + \",\" + str(result2)) \r\n f.write('\\n')\r\n\r\n","repo_name":"Arshadfaz20/Research-Project-","sub_path":"data_reading.py","file_name":"data_reading.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"15776773807","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.loginPage, name=\"login\"),\n path('logout/', views.logoutUser, name=\"logout\"),\n path('register/', views.register, name=\"register\"),\n path('flux/', views.flux, name='flux'),\n path('abo/', views.abo, name='abo'),\n path('create-ticket/', views.create_ticket, name='ticket'),\n path('create-critique/', views.create_critique, name='critique'),\n path('answer-ticket//', views.answer_ticket, name='answer'),\n path('own-posts/', views.own_posts, name='own'),\n path('update-own-critique/', views.update_own_critique, name='update-own-critique'),\n path('update-own-ticket//', views.update_own_ticket, name='update-own-ticket'),\n path('delete//', views.delete, name='delete'),\n]\n","repo_name":"RafaRemote/oc_python_p9_develop_django_webapp","sub_path":"base/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"4550669532","text":"import sys\nimport sqlite3\nimport os\nimport time\nimport datetime\nimport pandas as pd\n\n# args = sys.argv\n# if len(args) > 1:\n# category = args[1]\n# else:\n# category = None\n\n# Connect to database\npath_to_db = \"../../../../../../../../WebScraping/clubs.db\"\n\nconn = sqlite3.connect(path_to_db)\nc = conn.cursor()\n\nall_posts = pd.read_sql_query(\"SELECT * FROM media3\", conn)\n\n# Filter posts to past 5 days\ntoday = datetime.date.today()\nfive_days_ago = today - datetime.timedelta(days=5)\nfive_days_ago = pd.to_datetime(five_days_ago)\n\n# Convert five_days_ago to epoch timestamp\nfive_days_ago_epoch = int(five_days_ago.timestamp())\n\nall_posts[\"post_date_epoch\"] = pd.to_datetime(all_posts[\"post_date\"]).apply(\n lambda x: int(x.timestamp())\n)\n\n\nrecent_posts = all_posts[all_posts['post_date_epoch'] > five_days_ago_epoch]\nclubs = pd.read_sql_query(\"SELECT * FROM clubs\", conn)\n\nto_json = pd.DataFrame(index=recent_posts.index)\nto_json['image'] = recent_posts['media_url']\nto_json['date'] = recent_posts['post_date']\nto_json['caption'] = recent_posts.get('caption', [None] * len(recent_posts))\nto_json['clubName'] = [None] * len(recent_posts)\nto_json['twitter'] = [None] * len(recent_posts)\nto_json['instagram'] = [None] * len(recent_posts)\nto_json['facebook'] = [None] * len(recent_posts)\nto_json['clubDescription'] = [None] * len(recent_posts)\nto_json['category'] = [None] * len(recent_posts)\n\n\nfor index, row in recent_posts.iterrows():\n club_name = row['club_name']\n club_info = clubs[clubs['club_name'] == club_name].iloc[0] if not clubs[clubs['club_name'] == club_name].empty else None\n if club_info is not None:\n to_json.at[index, 'clubName'] = club_info['club_name']\n to_json.at[index, 'twitter'] = club_info.get('twitterUrl')\n to_json.at[index, 'instagram'] = club_info.get('instagramUrl')\n to_json.at[index, 'facebook'] = club_info.get('facebookUrl')\n to_json.at[index, 'clubDescription'] = club_info.get('description')\n\n\nclub_categories = pd.read_sql_query(\"SELECT * FROM club_categories\", conn)\n\n# category_mapping = pd.read_sql_query(\"SELECT * FROM categories\", conn)\n# # dict where key is category_name, and id is category_id\n# category_mapping = dict(zip(category_mapping['category_name'], category_mapping['category_id']))\n\n# # Filter by category if specified\n# if category:\n# for index, row in to_json.iterrows():\n# club_name = row['clubName']\n# club_id = clubs[clubs['club_name'] == club_name].iloc[0]['club_id']\n# club_cat_ids = club_categories[club_categories['club_id'] == club_id]\n# if category_mapping[category] not in club_cat_ids['category_id'].values:\n# to_json.drop(index, inplace=True)\n\n# print(to_json)\n# Add categories to json\n\nid_to_category = pd.read_sql_query(\"SELECT * FROM categories\", conn)\nid_to_category = dict(zip(id_to_category['category_id'], id_to_category['category_name']))\n\nfor index, row in to_json.iterrows():\n club_name = row['clubName']\n club_id = clubs[clubs['club_name'] == club_name].iloc[0]['club_id']\n club_category = club_categories[club_categories['club_id'] == club_id]['category_id'].values\n club_category = [id_to_category[cat_id] for cat_id in club_category]\n club_category = ':'.join(club_category)\n to_json.at[index, 'category'] = club_category\n\n# Write df to json file\nto_json.to_json(\"posts.json\", orient=\"records\")","repo_name":"srpatil24/MadHacks","sub_path":"BackEnd/src/main/java/com/example/backend/posts/get_json.py","file_name":"get_json.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"29205878065","text":"#!/usr/bin/python3\ndef list_division(my_list_1, my_list_2, list_length):\n new_list = []\n for a in range(list_length):\n try:\n rslt = my_list_1[a]/my_list_2[a]\n except TypeError:\n print(\"wrong type\")\n rslt = 0\n except ZeroDivisionError:\n print(\"division by 0\")\n rslt = 0\n except IndexError:\n print(\"out of range\")\n rslt= 0\n finally:\n new_list.append(rslt)\n return (new_list)\n","repo_name":"Hyake/alx-higher_level_programming","sub_path":"0x05-python-exceptions/4-list_division.py","file_name":"4-list_division.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"35074285230","text":"from flask import Flask, render_template, request, send_file\r\nfrom flask_mysqldb import MySQL\r\nimport io\r\nimport pandas as pd\r\nimport xlsxwriter\r\nfrom datetime import date\r\n\r\ntoday = date.today()\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n# MySQL configurations\r\napp.config['MYSQL_USER'] = 'root'\r\napp.config['MYSQL_PASSWORD'] = '22EC1U3Inisl#wiswAPe'\r\napp.config['MYSQL_DB'] = 'emsdb'\r\napp.config['MYSQL_HOST'] = 'localhost'\r\napp.config['MYSQL_CURSORCLASS'] = 'DictCursor'\r\n\r\n\r\nmysql = MySQL(app)\r\n\r\n\r\n@app.route(\"/\")\r\ndef index():\r\n return render_template(\"index.html\")\r\n\r\n\r\n@app.route(\"/welcome\", methods=[\"GET\", \"POST\"])\r\ndef welcome():\r\n cur = mysql.connection.cursor()\r\n cur.execute('''call ac_det''')\r\n acc_det = cur.fetchall()\r\n cur.close()\r\n if request.method == 'POST':\r\n username = request.form.get(\"user\")\r\n password = request.form.get(\"pass\")\r\n\r\n cur = mysql.connection.cursor()\r\n cur.callproc('staff_timetable', [int(username)])\r\n det = cur.fetchall()\r\n cur.close()\r\n\r\n x = len(det)\r\n value = []\r\n for i in acc_det:\r\n if (i['staff_id'] == int(username) and i['staff_pass'] == password):\r\n cur = mysql.connection.cursor()\r\n cur.callproc('staff_details', [int(username)])\r\n staff = cur.fetchall()\r\n cur.close()\r\n # age = staff[0]['age']\r\n # dept = staff[0]['dept_name']\r\n name = i['staff_name']\r\n # st_id = staff[0]['staff_id']\r\n # desg = staff[0]['designation']\r\n key = det[1].keys()\r\n for j in range(0, x):\r\n row = []\r\n for k in key:\r\n row.append(det[j][k])\r\n value.append(row)\r\n return render_template(\"welcome.html\", name=name)\r\n # return render_template(\"table.html\", name=name, key=key, value=value, dept=dept, id=st_id, desg=desg, age=age)\r\n return \"login failed!!\"\r\n\r\n\r\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\r\ndef login():\r\n\r\n return render_template(\"Login.html\")\r\n\r\n\r\n@app.route(\"/contact\", methods=[\"GET\", \"POST\"])\r\ndef contact():\r\n name = request.form.get(\"name\")\r\n mail = request.form.get(\"mail\")\r\n message = request.form.get(\"message\")\r\n cur = mysql.connection.cursor()\r\n cur.callproc('insert_contact', [name, mail, message])\r\n cur.close()\r\n return render_template(\"contact.html\", name=name, message=message, mail=mail)\r\n\r\n\r\n@app.route(\"/details\", methods=[\"GET\", \"POST\"])\r\ndef details():\r\n day = request.form.get(\"day\")\r\n start = int(request.form.get(\"start\"))\r\n end = int(request.form.get(\"end\"))\r\n hall = int(request.form.get(\"hall\"))\r\n # file = request.files['file']\r\n # file.save(file.filename)\r\n # Parse the data as a Pandas DataFrame type\r\n # data = ((pd.read_excel(file))['Halls']).tolist()\r\n # print(data)\r\n if start < end:\r\n cur = mysql.connection.cursor()\r\n cur.callproc('final_list', [day])\r\n staff = cur.fetchall()\r\n print(staff)\r\n x = len(staff)\r\n cur.close()\r\n key = staff[1].keys()\r\n value = []\r\n for j in range(0, x):\r\n row = []\r\n for k in key:\r\n row.append(staff[j][k])\r\n value.append(row)\r\n print(value)\r\n count = 0\r\n final_list = [['staff_id', 'staff_name','designation', 'dept_name', 'age']]\r\n for i in value:\r\n flag = 0\r\n for j in range((4+start), ((4+end)+1)):\r\n print(i[j])\r\n if (i[j] != None):\r\n flag = 1\r\n break\r\n if(count >= hall):\r\n break\r\n if (flag == 0):\r\n final_list.append(i[0:5])\r\n count = count+1\r\n try:\r\n df = pd.DataFrame(final_list[1:], columns=final_list[0])\r\n print(df)\r\n output = io.BytesIO()\r\n writer = pd.ExcelWriter(output, engine='xlsxwriter')\r\n df.to_excel(writer, sheet_name='Sheet1')\r\n writer.save()\r\n output.seek(0)\r\n return send_file(output, mimetype='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', as_attachment=True, download_name='data.xlsx')\r\n except IndexError:\r\n return \"NO INVIGILATORS FOUND!!!\"\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n\r\n\r\n'''@app.route(\"/greet\", methods=[\"POST\"])\r\ndef greet():\r\n name = request.form.get(\"name\", \"world\")\r\n if name == \"shoaib\":\r\n return render_template(\"greet.html\", name=\"chutiye\")\r\n else:\r\n return render_template(\"greet.html\", name=name)'''\r\n","repo_name":"mesh05/flask","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4649,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"}
+{"seq_id":"10031324037","text":"import os, sys\n\nreader, writer = os.pipe()\npid = os.fork()\n\nif pid == 0:\n # Child schreibt nur; lesende Seite schließen\n os.close(reader)\n # Schreibende Seite tatsächlich zum Schreiben öffnen\n writer = os.fdopen(writer, 'w')\n for i in range(1, 101):\n print(f\" Child ist bei: {i}\")\n writer.write(str(i) + \"\\n\")\n sys.exit(0)\nelse:\n # Parent liest nur; schreibende Seite schließen\n os.close(writer)\n # Lesende Seite tatsächlich zum Lesen öffnen\n reader = os.fdopen(reader, 'r')\n # Endlosschleife zum Lesen\n while True:\n line = reader.readline()\n # Ende, wenn keine Daten mehr kommen\n if not line:\n sys.exit(0)\n i = int(line)\n print(f\"Parent berechnet Quadrat von {i}: {i ** 2}\")\n","repo_name":"SaschaKersken/ITHandbuch10","sub_path":"listings/chapter09/pipe.py","file_name":"pipe.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"de","doc_type":"code","stars":20,"dataset":"github-code","pt":"72"}
+{"seq_id":"43588925906","text":"#!/bin/python3\n\nimport os\n\nimport json\nimport time\nimport paramiko\n\n\nimport pandas as pd\nfrom tabulate import tabulate\n\nfrom fabrictestbed.util.constants import Constants\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom fabrictestbed.slice_editor import (\n ExperimentTopology,\n Capacities\n)\n\nfrom ipaddress import IPv4Address, IPv6Address, IPv4Network, IPv6Network\n\n\nclass Interface_Custom():\n def place_holder():\n pass\n \n def add_ip(self, ip=None):\n network = self.get_network()\n iface_userdata = self.get_userdata()\n network_userdata = network.get_userdata()\n \n ip = network.allocate_ip()\n \n iface_userdata['ip'] = str(ip)\n iface_userdata['subnet'] = str(network_userdata['subnet'])\n \n subnet_cidr = iface_userdata['subnet'].split('/')[1]\n dev = self.get_device_name()\n gateway = network_userdata['gateway']\n #self.get_node().execute(f'sudo nmcli connection mod {dev} ipv4.method manual ip4 {iface_userdata[\"ip\"]}/{subnet_cidr} ;'\n # f'sudo nmcli connection mod {dev} +ipv4.routes \"10.128.0.0/10 {gateway}\" ;'\n # f'sudo nmcli con down {dev} ;'\n # f'sudo nmcli con up {dev} ;', quiet=True)\n \n self.get_node().execute(f'sudo nmcli connection mod {dev} ipv4.method manual ip4 {iface_userdata[\"ip\"]}/{subnet_cidr} ;' \n f'sudo nmcli con down {dev} ;'\n f'sudo nmcli con up {dev} ;', quiet=True)\n \n return ip\n \n \n def get_ip(self):\n return list(filter(lambda x: x['name'] == self.get_name(), self.get_slice().userdata['interfaces']))[0]['ip']\n\n \n def init_userdata(self):\n iface_userdata = self.get_userdata()\n \n iface_userdata['dev'] = self.get_device_name()\n\n \n def init_for_network_manager(self):\n dev = self.get_device_name()\n self.get_node().execute(f'sudo nmcli device set {dev} managed yes', quiet=True)\n self.get_node().execute(f'sudo nmcli connection add con-name {dev} autoconnect yes save yes ipv6.method disabled ipv4.method disabled ifname {dev} type ethernet', quiet=True)\n\n \n \n def get_userdata(self):\n return list(filter(lambda x: x['name'] == self.get_name(), self.get_slice().userdata['interfaces']))[0]\n \n \n def get_device_name(self) -> str:\n \"\"\"\n Gets a name of the device name on the node\n\n If the interface requires a FABRIC VLAN tag, the interface name returned\n will be the VLAN tagged interface name.\n\n :return: OS interface name\n :rtype: String\n \"\"\"\n try:\n iface_userdata = self.get_userdata()\n \n if 'dev' in iface_userdata:\n return iface_userdata['dev']\n else:\n # logging.debug(f\"iface: {self}\")\n os_iface = self.get_physical_os_interface_name()\n vlan = self.get_vlan()\n\n if vlan is not None:\n os_iface = f\"{os_iface}.{vlan}\"\n \n iface_userdata['dev'] = os_iface\n except:\n os_iface = None\n\n return os_iface\n \n# Add methods to FABlib Classes\nfrom fabrictestbed_extensions.fablib.interface import Interface\n\n\n#fablib.Interface\n#setattr(Interface, 'get_userdata', Interface_Custom.get_userdata)\n#setattr(Interface, 'add_ip', Interface_Custom.add_ip)\n#setattr(Interface, 'init_for_network_manager', Interface_Custom.init_for_network_manager)\n#setattr(Interface, 'get_ip', Interface_Custom.get_ip)\n#setattr(Interface, 'init_userdata', Interface_Custom.init_userdata)\n#setattr(Interface, 'get_device_name', Interface_Custom.get_device_name)\n","repo_name":"fabric-testbed/jupyter-examples","sub_path":"fabric_examples/public_demos/SC22/fablib_local/fablib_custom/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"}
+{"seq_id":"25142057411","text":"### Mini-projet\n# Vous devez calculer les points gagnés par une equipe de football\n# L'equipe a remporté 18 jeux et a terminé 7 jeux comme match nul\n# Une victoire apporte 3 points, tandis qu'un match null apporte 1 point\n### Crez un prpogramme pour calculer et produire le total des points gagnés par l'equipe\n\nmatch_nul = 7\nmatch_gagner = 18\ntotal_match = match_gagner + match_nul\n\npoint_gagner = match_gagner * 3\npoint_egaliter = match_nul * 1\n\ntotal_point = point_gagner + point_egaliter\n\nprint(total_point)","repo_name":"mrfinker/Python","sub_path":"Projects/Apps/Learing/Debutant/1_Basics/2_2_Mini_projet.py","file_name":"2_2_Mini_projet.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"9444255655","text":"import face_recognition\nimport cv2\nimport numpy as np\nimport json\nimport requests\nfrom threading import Thread, Lock\nfrom queue import Queue\nfrom utils import *\nimport time\n\nwith open(\"config.json\") as config_file:\n config = json.load(config_file)\n\nENDPOINT_URL = config[\"ENDPOINT_URL\"]\nEVENT_ID = config[\"EVENT_ID\"]\nWINDOW_HEIGHT = config[\"WINDOW_HEIGHT\"]\nWINDOW_WIDTH = config[\"WINDOW_WIDTH\"]\nGUI_OVERLAY_PATH = config[\"GUI_OVERLAY_PATH\"]\n\nGUI_OVERLAY = cv2.imread(GUI_OVERLAY_PATH)\nGUI_OVERLAY = cv2.resize(GUI_OVERLAY, (WINDOW_WIDTH, WINDOW_HEIGHT))\n\nvideo_capture = cv2.VideoCapture(0)\ncv2.namedWindow(\"BLinkIOT\", cv2.WINDOW_GUI_NORMAL)\ncv2.setWindowProperty(\"BLinkIOT\", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN);\n\nface_encoding_dict = {}\nevent_attendance_dict = {}\n\nknown_face_encodings = []\nknown_face_names = []\n\nface_locations = []\nface_encodings = []\nface_names = []\nis_running = True\nlast_update_time = None\nlock = Lock()\n\nfound_users_queue = Queue()\n\nprint(\"event_attendance_dict\",event_attendance_dict)\n\n_, frame = video_capture.read() \ndetected_users = []\nlast_detected_time = time.time()\n\ndef get_video_capture():\n global frame\n global lock\n global video_capture\n\n while True: \n lock.acquire() \n _, frame = video_capture.read()\n lock.release()\n\n if not is_running:\n break\n\ndef update_detected_users():\n global frame\n global detected_users\n global face_encoding\n global last_detected_time\n global lock\n\n while True:\n if time.time() - last_detected_time > 1:\n fh, fw, _ = frame.shape\n small_frame = frame\n small_frame = cv2.resize(small_frame, (0, 0), fx=0.25, fy=0.25)\n\n rgb_small_frame = small_frame[:, :, ::-1]\n\n face_locations = face_recognition.face_locations(rgb_small_frame)\n face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)\n\n face_names = []\n\n for face_encoding in face_encodings:\n matches = face_recognition.compare_faces(known_face_encodings, face_encoding)\n face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)\n\n if (np.amin(face_distances) < 0.4):\n best_match_index = np.argmin(face_distances)\n if matches[best_match_index]:\n name = known_face_names[best_match_index]\n face_names.append(name)\n \n detected_users = face_names\n last_detected_time = time.time()\n\n if not is_running:\n break\n\ndef get_encodings_attendance():\n global face_encoding_dict\n global event_attendance_dict\n global known_face_encodings\n global known_face_names\n global last_update_time\n\n face_encoding_dict = load_face_encodings()\n print(\"face_encoding_dict updated!\", face_encoding_dict.keys())\n event_attendance_dict = load_attendance()\n known_face_encodings = []\n known_face_names = []\n\n for k, v in event_attendance_dict.items():\n username = v[\"username\"]\n if username in face_encoding_dict:\n known_face_encodings.append(face_encoding_dict[username])\n known_face_names.append(username)\n\n print(\"event_attendance_dict updated! \", event_attendance_dict)\n last_update_time = time.time()\n\ndef update_encodings_attendance():\n while True:\n if time.time() - last_update_time > 10:\n lock.acquire()\n get_encodings_attendance()\n lock.release()\n if not is_running:\n break\n\ndef mark_attendance_users():\n while True:\n if not found_users_queue.empty():\n user = found_users_queue.get()\n if event_attendance_dict[user][\"attended\"] == False:\n print(\"found user:\", user)\n res = requests.post(ENDPOINT_URL+\"/markAttendanceForEvent\", data={\"username\": user, \"event_id\": EVENT_ID})\n if res.status_code == 200 and res.json()[\"status\"] == \"SUCCESS\":\n print(\"Marked attendance for\", user)\n event_attendance_dict[user][\"attended\"] = True\n else:\n print(\"ERROR while marking attendance: \", res.json())\n if not is_running:\n break\n\nget_encodings_attendance()\n\nmark_attendance_thread = Thread(target=mark_attendance_users)\nmark_attendance_thread.start()\nupdate_encodings_attendance_thread = Thread(target=update_encodings_attendance)\nupdate_encodings_attendance_thread.start()\n\nget_video_capture_thread = Thread(target=get_video_capture)\nget_video_capture_thread.start()\nupdate_detected_users_thread = Thread(target=update_detected_users)\nupdate_detected_users_thread.start()\n\nwhile True:\n display_frame = frame\n display_frame = cv2.resize(display_frame, (0, 0), fx=1.5, fy=1.5)\n fh, fw, _ = display_frame.shape\n \n for name in detected_users:\n if name in event_attendance_dict \\\n and event_attendance_dict[name][\"registered\"] == True \\\n and event_attendance_dict[name][\"attended\"] == False:\n cv2.rectangle(display_frame, (0, fh-100), (fw, fh), (209, 136,2), cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n displayname = event_attendance_dict[name][\"displayname\"]\n cv2.putText(display_frame, \"Hello \" + displayname + \"!\", (10, fh-60), font, 1.2, (255, 255, 255), 2)\n cv2.putText(display_frame, \"We're checking you in...\", (10, fh-20), font, 1.0, (255, 255, 255), 1)\n found_users_queue.put(name)\n break\n\n elif name in event_attendance_dict \\\n and event_attendance_dict[name][\"registered\"] == True \\\n and event_attendance_dict[name][\"attended\"] == True:\n cv2.rectangle(display_frame, (0, fh-100), (fw, fh), (60, 142, 56), cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n displayname = event_attendance_dict[name][\"displayname\"]\n cv2.putText(display_frame, \"Welcome \" + displayname + \"!\", (10, fh-60), font, 1.2, (255, 255, 255), 2)\n cv2.putText(display_frame, \"You're checked in.\", (10, fh-20), font, 1.0, (255, 255, 255), 1)\n break\n\n elif name in event_attendance_dict \\\n and event_attendance_dict[name][\"registered\"] == False:\n cv2.rectangle(display_frame, (0, fh-100), (fw, fh), (28,28,183), cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n displayname = event_attendance_dict[name][\"displayname\"]\n cv2.putText(display_frame, \"Hello \" + displayname + \"!\", (10, fh-60), font, 1.2, (255, 255, 255), 2)\n cv2.putText(display_frame, \"It seems like you're not registered\", (10, fh-20), font, 1.0, (255, 255, 255), 1)\n break\n \n \n display_gui = GUI_OVERLAY\n \n x_margin = int((WINDOW_WIDTH - fw) / 2) \n y_margin = int((WINDOW_HEIGHT - fh) / 2)\n\n display_gui[75: 75 + fh ,x_margin: -x_margin,:] = display_frame\n\n cv2.imshow('BLinkIOT', display_gui)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n is_running = False\n break\n\nsave_attendance(event_attendance_dict)\nvideo_capture.release()\ncv2.destroyAllWindows()","repo_name":"elliotmoose/BLinkIoT","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"9446948258","text":"class Solution:\n def findMin(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n pivot = len(nums)-1\n for i in range(len(nums)-1):\n \tif nums[i] > nums[i+1]:\n \t\tpivot = i\n return nums[(pivot+1)%len(nums)]","repo_name":"Ting007/leetcodePractice","sub_path":"153MiniInArray.py","file_name":"153MiniInArray.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"31696990047","text":"primeiroTermo = int(input('Vamos fazer uma PA.'\n '\\nDigite o primeiro termo dessa PA: '))\nrazão = int(input('Digite a razão para essa PA: '))\nprint('Os 10 primeiros termos da PA são:')\ntermo = primeiroTermo\nc = 0\nwhile c < 10:\n print('{} -> '.format(termo), end='')\n c += 1\n termo += razão\nprint('FIM!')\n","repo_name":"henrique-sk/Python-Curso-em-Video","sub_path":"Mundo01-03/Exercicios/ex061.py","file_name":"ex061.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"5056881887","text":"\"\"\"\r\n This module provides pre-defined selectors for evolutionary computations.\r\n\r\n All selector functions have the following arguments:\r\n \r\n - *random* -- the random number generator object\r\n - *population* -- the population of individuals\r\n - *args* -- a dictionary of keyword arguments\r\n \r\n Each selector function returns the list of selected individuals.\r\n \r\n .. Copyright (C) 2009 Inspired Intelligence Initiative\r\n\r\n .. This program is free software: you can redistribute it and/or modify\r\n it under the terms of the GNU General Public License as published by\r\n the Free Software Foundation, either version 3 of the License, or\r\n (at your option) any later version.\r\n\r\n .. This program is distributed in the hope that it will be useful,\r\n but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n GNU General Public License for more details.\r\n\r\n .. You should have received a copy of the GNU General Public License\r\n along with this program. If not, see .\r\n\"\"\"\r\n\r\n\r\ndef default_selection(random, population, args):\r\n \"\"\"Return the population.\r\n \r\n This function acts as a default selection scheme for an EC.\r\n It simply returns the entire population as having been \r\n selected.\r\n \r\n .. Arguments:\r\n random -- the random number generator object\r\n population -- the population of individuals\r\n args -- a dictionary of keyword arguments\r\n \r\n \"\"\"\r\n return population\r\n\r\n\r\ndef truncation_selection(random, population, args):\r\n \"\"\"Selects the best individuals from the population.\r\n \r\n This function performs truncation selection, which means that only\r\n the best individuals from the current population are selected. This\r\n is a completely deterministic selection mechanism.\r\n \r\n .. Arguments:\r\n random -- the random number generator object\r\n population -- the population of individuals\r\n args -- a dictionary of keyword arguments\r\n\r\n Optional keyword arguments in args:\r\n \r\n *num_selected* -- the number of individuals to be selected \r\n (default len(population))\r\n \r\n \"\"\"\r\n num_selected = args.setdefault('num_selected', len(population))\r\n pool = list(population)\r\n pool.sort(reverse=True)\r\n return pool[:num_selected]\r\n\r\n \r\ndef uniform_selection(random, population, args):\r\n \"\"\"Return a uniform sampling of individuals from the population.\r\n \r\n This function performs uniform selection by randomly choosing\r\n members of the population with replacement.\r\n \r\n .. Arguments:\r\n random -- the random number generator object\r\n population -- the population of individuals\r\n args -- a dictionary of keyword arguments\r\n\r\n Optional keyword arguments in args:\r\n \r\n *num_selected* -- the number of individuals to be selected \r\n (default 1)\r\n \r\n \"\"\"\r\n num_selected = args.setdefault('num_selected', 1)\r\n pop = list(population)\r\n selected = []\r\n for _ in range(num_selected):\r\n selected.append(pop[random.randint(0, len(pop)-1)])\r\n return selected\r\n\r\n\r\ndef fitness_proportionate_selection(random, population, args):\r\n \"\"\"Return fitness proportionate sampling of individuals from the population.\r\n\r\n .. Arguments:\r\n random -- the random number generator object\r\n population -- the population of individuals\r\n args -- a dictionary of keyword arguments\r\n\r\n Optional keyword arguments in args:\r\n \r\n *num_selected* -- the number of individuals to be selected (default 1)\r\n \r\n \"\"\"\r\n num_selected = args.setdefault('num_selected', 1)\r\n pop = list(population)\r\n len_pop = len(pop)\r\n psum = [i for i in range(len_pop)]\r\n pop_max_fit = (max(pop)).fitness\r\n pop_min_fit = (min(pop)).fitness\r\n \r\n # If we're actually doing minimimization,\r\n # fitness proportionate selection is not defined.\r\n if pop_max_fit < pop_min_fit:\r\n raise ValueError('Fitness proportionate selection is not valid for minimization.')\r\n \r\n # Set up the roulette wheel\r\n if pop_max_fit == pop_min_fit:\r\n psum = [(index + 1) / float(len_pop) for index in range(len_pop)]\r\n elif (pop_max_fit > 0 and pop_min_fit >= 0) or (pop_max_fit <= 0 and pop_min_fit < 0):\r\n pop.sort(reverse=True)\r\n psum[0] = pop[0].fitness\r\n for i in range(1, len_pop):\r\n psum[i] = pop[i].fitness + psum[i-1]\r\n for i in range(len_pop):\r\n psum[i] /= float(psum[len_pop-1])\r\n \r\n # Select the individuals\r\n selected = []\r\n for _ in range(num_selected):\r\n cutoff = random.random()\r\n lower = 0\r\n upper = len_pop - 1\r\n while(upper >= lower):\r\n mid = (lower + upper) // 2\r\n if psum[mid] > cutoff: \r\n upper = mid - 1\r\n else: \r\n lower = mid + 1\r\n lower = max(0, min(len_pop-1, lower))\r\n selected.append(pop[lower])\r\n return selected\r\n\r\n\r\ndef rank_selection(random, population, args):\r\n \"\"\"Return a rank-based sampling of individuals from the population.\r\n \r\n .. Arguments:\r\n random -- the random number generator object\r\n population -- the population of individuals\r\n args -- a dictionary of keyword arguments\r\n\r\n Optional keyword arguments in args:\r\n \r\n *num_selected* -- the number of individuals to be selected (default 1)\r\n \r\n \"\"\"\r\n num_selected = args.setdefault('num_selected', 1)\r\n\r\n # Set up the roulette wheel\r\n pop = list(population)\r\n len_pop = len(pop)\r\n pop.sort()\r\n psum = range(len_pop)\r\n den = (len_pop * (len_pop + 1)) / 2.0\r\n for i in range(len_pop):\r\n psum[i] = (i + 1) / den\r\n for i in range(1, len_pop):\r\n psum[i] += psum[i-1]\r\n \r\n # Select the individuals\r\n selected = []\r\n for _ in range(num_selected):\r\n cutoff = random.random()\r\n lower = 0\r\n upper = len_pop - 1\r\n while(upper >= lower):\r\n mid = (lower + upper) // 2\r\n if psum[mid] > cutoff: \r\n upper = mid - 1\r\n else: \r\n lower = mid + 1\r\n lower = max(0, min(len_pop-1, lower))\r\n selected.append(pop[lower])\r\n return selected\r\n\r\n\r\ndef tournament_selection(random, population, args):\r\n \"\"\"Return a tournament sampling of individuals from the population.\r\n \r\n .. Arguments:\r\n random -- the random number generator object\r\n population -- the population of individuals\r\n args -- a dictionary of keyword arguments\r\n\r\n Optional keyword arguments in args:\r\n \r\n - *num_selected* -- the number of individuals to be selected (default 1)\r\n - *tourn_size* -- the tournament size (default 2)\r\n \r\n \"\"\"\r\n num_selected = args.setdefault('num_selected', 1)\r\n tourn_size = args.setdefault('tourn_size', 2)\r\n pop = list(population)\r\n selected = []\r\n for _ in range(num_selected):\r\n tourn = random.sample(pop, tourn_size)\r\n selected.append(max(tourn))\r\n return selected\r\n\r\n\r\n","repo_name":"thanos/ecspy","sub_path":"ecspy/selectors.py","file_name":"selectors.py","file_ext":"py","file_size_in_byte":7177,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"}
+{"seq_id":"8246697335","text":"\"\"\"Manage Telegraf agent.\"\"\"\n\n\nimport sys\n\nfrom . import message\nfrom . import system\n\n\nCONF_PATH = \"/etc/telegraf/telegraf.conf\"\n\n\ndef get_install_agent_cmd():\n \"\"\"Get OS specific command to install Telegraf agent.\"\"\"\n agent_pkg_deb = \"https://packagecloud.io/install/repositories/\" \\\n \"wavefront/telegraf/script.deb.sh\"\n agent_pkg_rpm = \"https://packagecloud.io/install/repositories/\" \\\n \"wavefront/telegraf/script.rpm.sh\"\n dist = system.check_os()\n cmd = None\n if not dist:\n print(\"Error: Unsupported OS version. Please contact\"\n \" support@wavefront.com.\")\n return cmd\n\n if dist.strip().startswith((\"Oracle Linux Server\", \"Fedora\",\n \"Amazon Linux\", \"CentOS\",\n \"Red Hat Enterprise Linux\")):\n cmd = \"curl -s %s | bash\" % (agent_pkg_rpm)\n cmd += \" && yum -y -q install telegraf\"\n elif dist.strip().startswith(\"Ubuntu\"):\n cmd = \"curl -s %s | bash\" % (agent_pkg_deb)\n cmd += ' && apt-get -y -qq -o Dpkg::Options::=\"--force-confold\"' \\\n ' install telegraf'\n elif dist.strip().lower().startswith(\"debian\"):\n cmd = \"curl -s %s | bash\" % (agent_pkg_deb)\n cmd += ' && apt-get -o Dpkg::Options::=\"--force-confnew\"' \\\n ' -y install telegraf'\n elif dist.strip().startswith((\"openSUSE\", \"SUSE Linux Enterprise Server\",\n \"SLES\")):\n cmd = \"curl -s %s | bash\" % (agent_pkg_rpm)\n cmd += ' && zypper install telegraf'\n else:\n message.print_warn(\"Error: Unsupported OS version: %s.\" % (dist))\n\n return cmd\n\n\ndef tag_telegraf_config(comment, tags):\n \"\"\"Add custom tags into Telegraf.\"\"\"\n message.print_bold(\"Adding custom tags to Telegraf configuration\")\n\n tags_pre = \"- %s -\" % (comment)\n tags_post = \"- end %s tags - \" % (comment)\n tag_str = \" # %s\\n\" % (tags_pre)\n for key, value in list(tags.items()):\n tag_str += ' %s = \"%s\"\\n' % (key.lower(), value)\n tag_str += \" # %s\\n\" % (tags_post)\n try:\n tag_txt = open(\"tags.txt\", \"w\")\n tag_txt.write(tag_str)\n tag_txt.close()\n except IOError:\n message.print_warn(\"Error writing tags.txt: \" + sys.exc_info())\n return False\n\n # remove existing ec2 tags\n conf = CONF_PATH\n cmd = \"sed -i '/%s/,/%s/d' %s\" % (tags_pre, tags_post, conf)\n\n ret_code = system.run_command(cmd)\n if ret_code > 0:\n message.print_warn(\"Error adding tags to Telegraf configuration\")\n return False\n\n cmd = r\"sed -i '/\\[global_tags\\]/r tags.txt' %s\" % (conf)\n\n ret_code = system.run_command(cmd)\n if ret_code > 0:\n message.print_warn(\"Error overwriting telegraf.conf.\"\n \" Is the file located at \" + conf + \"? \")\n\n message.print_success(\"Finished adding tags to Telegraf configuration.\")\n return True\n\n\ndef install_agent():\n \"\"\"Install Telegraf.\"\"\"\n telegraf_conf = \"https://raw.githubusercontent.com/wavefrontHQ/\" \\\n \"integrations/master/telegraf/telegraf.conf\"\n message.print_bold(\"Starting Telegraf Installation!\")\n print(\"Downloading configuration to \", CONF_PATH)\n\n cmd = \"mkdir -p /etc/telegraf && sudo curl -o %s %s\"\\\n % (CONF_PATH, telegraf_conf)\n ret_code = system.run_command(cmd)\n if ret_code > 0:\n message.print_warn(\"Error downloading Telegraf config file.\")\n return False\n\n cmd = get_install_agent_cmd()\n print(\"Running \", cmd)\n ret_code = system.run_command(cmd)\n if ret_code > 0:\n message.print_warn(\"Error installing Telegraf\")\n return False\n\n message.print_success(\"Finished Installing Telegraf!\")\n message.print_success(\"The Telegraf configuration file can be found\"\n \" at /etc/telegraf/telegraf.conf\")\n return True\n","repo_name":"skajagar/wavefront-cli","sub_path":"wavefront_cli/lib/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"38188363543","text":"\nimport os\nimport magic\nimport ffmpy\nimport operator\n\n\nIMAGE = 1\nVIDEO = 2\nAUDIO = 3\nTEXT = 4\nAPPLICATION = 5\n \nNOT_SUPPORTED = 99\n\ndef get_mime_type(path):\n \n TYPE = 0\n SUBTYPE = 1\n\n mime_type = magic.from_file(path, mime=True).split('/')\n \n if mime_type[TYPE] == 'image': \n return IMAGE\n \n elif mime_type[TYPE] == 'video':\n return VIDEO\n \n elif mime_type[TYPE] == 'audio':\n return AUDIO\n \n elif mime_type[TYPE] == 'application' and mime_type[SUBTYPE] == 'octet-stream': ## TODO: also include extensions\n return AUDIO\n \n elif mime_type[TYPE] == 'text':\n return TEXT\n \n else:\n return NOT_SUPPORTED\n\n\nclass media_converter:\n\n #def __init__(self):\n # \n\n def __add_to(self, mime_type_sizes, tag, value):\n \n if tag in mime_type_sizes: \n mime_type_sizes[tag] = mime_type_sizes[tag] + value\n else:\n mime_type_sizes[tag] = value\n\n return mime_type_sizes\n \n \n def __get_top_tag(self, mime_type_sizes):\n \n mime_type_sizes_sorted = sorted(mime_type_sizes.items(), key=operator.itemgetter(1))\n\n if len(mime_type_sizes_sorted) > 0:\n return mime_type_sizes_sorted[0][0]\n else:\n return None\n \n def mime_type(self, source_path):\n \n mime_type_sizes = {}\n \n if os.path.isdir(source_path):\n for subdir, dirs, files in os.walk(source_path):\n \n for file in files:\n file_path = os.path.join(subdir,file)\n mime_type = get_mime_type(file_path)\n mime_type_sizes = self.__add_to(mime_type_sizes, mime_type, os.path.getsize(file_path))\n \n return self.__get_top_tag(mime_type_sizes)\n \n else:\n return get_mime_type(file_path)\n\n def __set_destination_ext(self, mime_type, path):\n\n if mime_type == IMAGE: \n return os.path.splitext(path)[0] + '.jpg'\n elif mime_type == VIDEO:\n return os.path.splitext(path)[0] + '.mp4'\n elif mime_type == AUDIO:\n return os.path.splitext(path)[0] + '.mp3'\n else:\n return path \n \n def convert(self, path):\n\n FFMPEG_PATH = 'C:\\\\Program Files\\\\ffmpeg\\\\bin\\\\ffmpeg.exe'\n\n for subdir, dirs, files in os.walk(path, topdown=False):\n for file in files:\n \n src = os.path.join(subdir, file)\n type = get_mime_type(src)\n \n if type in [IMAGE, VIDEO, AUDIO]: \n dst = self.__set_destination_ext(type, src)\n \n if os.path.splitext(src)[1] != os.path.splitext(dst)[1]:\n ff = ffmpy.FFmpeg(executable=FFMPEG_PATH, inputs={src: None}, outputs={dst: '-y'})\n ff.run()\n os.remove(src)\n else: \n os.remove(src)","repo_name":"FatboyHomeNetwork/media_library","sub_path":"library/src/media_converter.py","file_name":"media_converter.py","file_ext":"py","file_size_in_byte":3134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"71376078633","text":"import json\nimport io\n\nimport avro.schema\nfrom avro.io import DatumWriter\nfrom kafka import KafkaProducer\n\nconfig = json.load(open(\"../../globalConfig.json\"))\n\nTOPIC_NAME = config[\"topics\"][\"aircraftCapacities\"]\nBROKER_URLS = config[\"brokerUrls\"]\n\nproducer = KafkaProducer(bootstrap_servers=BROKER_URLS, key_serializer=str.encode)\n\nschema = avro.schema.parse(open(\"aircraftCapacity.avsc\").read())\n\nfor aircraft_type, capacity in json.load(open(\"./aircraftTypesTable.json\")).items():\n writer = DatumWriter(schema)\n bytes_writer = io.BytesIO()\n encoder = avro.io.BinaryEncoder(bytes_writer)\n writer.write({\"aircraftType\": aircraft_type, \"capacity\": capacity}, encoder)\n raw_bytes = bytes_writer.getvalue()\n producer.send(TOPIC_NAME, raw_bytes, key=aircraft_type)\n\nproducer.flush()\n\nprint(\"success!\")\n","repo_name":"ayakudere/arrivals-departures","sub_path":"utility/aircraft-capacity/writeToKafka.py","file_name":"writeToKafka.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"}
+{"seq_id":"17484482736","text":"import logging\r\nimport json\r\nfrom argparse import ArgumentParser\r\nfrom datetime import timedelta\r\nfrom functools import partial\r\nfrom ipaddress import IPv4Interface, ip_interface, IPv4Address\r\nfrom pathlib import PurePosixPath, Path\r\nfrom time import time\r\n\r\nfrom deployerlib.cluster import Cluster\r\nfrom deployerlib.ha import HA\r\nfrom deployerlib.pg import PG\r\nfrom deployerlib.vm import VBox\r\n\r\n\r\nclass PgHaVm(HA, PG, VBox):\r\n def __init__(self, **kwargs):\r\n super(PgHaVm, self).__init__(**kwargs)\r\n\r\n def pgha_deploy_db(self, demo_db_file):\r\n local_db_file = Path(demo_db_file)\r\n db_file_name = local_db_file.name\r\n remote_db_file = PurePosixPath(\"/tmp\") / db_file_name\r\n self.sftp_put(local_db_file, remote_db_file, self.pg_user)\r\n db = local_db_file.stem\r\n self.ssh_run_check(\r\n f\"cd /tmp && tar -xf {db_file_name} && rm -f {db_file_name}\",\r\n user=self.pg_user)\r\n self.pg_drop_db(db)\r\n self.pg_create_db(db)\r\n self.ssh_run_check(\r\n f\"cd /tmp/{db} && {self.psql} -p {self.pg_port} -v ON_ERROR_STOP=1 \"\r\n f\"-t -q -f install.sql {db}\",\r\n user=self.pg_user)\r\n self.ssh_run_check(f'rm -rf /tmp/{db}')\r\n\r\n\r\nclass PgHaCluster(Cluster):\r\n\r\n def __init__(self, *, cluster_def, **kwargs):\r\n super(PgHaCluster, self).__init__(\r\n cluster_def=cluster_def, vm_class=PgHaVm, **kwargs)\r\n self.master = None\r\n self.demo_db = cluster_def[\"demo_db\"]\r\n self.pgha_file = cluster_def[\"pgha_file\"]\r\n self.pgha_resource = cluster_def[\"pgha_resource\"]\r\n self.pgha_resource_master = f\"{self.pgha_resource}-master\"\r\n self.pgha_resource_master_ip = f\"{self.pgha_resource_master}-ip\"\r\n self.virtual_ip = cluster_def[\"virtual_ip\"]\r\n\r\n def deploy(self):\r\n self.deploy_base()\r\n self.pgha_put_pgha_on_nodes()\r\n self.pgha_setup_master()\r\n self.pgha_setup_slaves()\r\n self.pgha_setup_ra()\r\n\r\n @property\r\n def standbies(self):\r\n return [vm for vm in self.vms if vm != self.master]\r\n\r\n def pgha_put_pgha_on_nodes(self):\r\n remote_ra = \"/usr/lib/ocf/resource.d/heartbeat/pgha\"\r\n for vm in self.vms:\r\n vm.sftp_put(self.pgha_file, remote_ra)\r\n vm.ssh_run_check(f\"chmod +x {remote_ra}\")\r\n\r\n def pgha_setup_master(self):\r\n self.master = self.vms[0]\r\n master = self.master\r\n master.pg_start()\r\n master.pgha_deploy_db(self.demo_db)\r\n master.pg_create_replication_user()\r\n hba_file = master.pg_hba_file\r\n for vm in self.vms:\r\n cmds = [\r\n f'echo \"host replication {h.pg_repl_user} {h.ip}/32 trust\" ' \r\n f'>> {hba_file}'\r\n for h in self.vms]\r\n vm.ssh_run_check(cmds, user=vm.pg_user)\r\n master.pg_make_master(self.vms)\r\n master.pg_restart()\r\n master.pg_add_replication_slots(self.standbies)\r\n\r\n def pgha_setup_slaves(self):\r\n master = self.master\r\n self.call([partial(m.pg_backup, master) for m in self.standbies])\r\n for vm in self.vms:\r\n if vm == master:\r\n vm.pg_write_recovery_conf()\r\n else:\r\n vm.pg_write_recovery_conf(master.name)\r\n self.call([partial(m.pg_start_stop) for m in self.standbies])\r\n master.pg_stop()\r\n\r\n def pgha_setup_ra(self):\r\n master = self.master\r\n master.ha_base_setup(self.vms)\r\n master.ha_set_migration_threshold(5)\r\n master.ha_set_resource_stickiness(10)\r\n master.ha_disable_stonith()\r\n self.pgha_configure_cib()\r\n\r\n def ha_get_vip_ipv4(self):\r\n if type(self.virtual_ip) is IPv4Interface:\r\n return self.virtual_ip\r\n if type(self.virtual_ip) is IPv4Address:\r\n return IPv4Interface(str(self.virtual_ip) + \"/24\")\r\n if \"/\" in self.virtual_ip:\r\n return ip_interface(self.virtual_ip)\r\n return IPv4Interface(self.virtual_ip + \"/24\")\r\n\r\n def pgha_configure_cib(self):\r\n master = self.master\r\n master.ha_get_cib()\r\n # pg_host: tcp or unix_socket_directories?\r\n if \"unix_socket_directories\" in master.pg_conf_dict:\r\n l = master.pg_conf_dict[\"unix_socket_directories\"].split(\",\")\r\n if len(l) > 0 and l[0]:\r\n pg_host = l[0]\r\n else:\r\n pg_host = \"localhost\"\r\n else:\r\n pg_host = \"/tmp\"\r\n # pgha\r\n master.ha_pcs_xml(\r\n f'resource create {self.pgha_resource} ocf:heartbeat:pgha '\r\n f'pgbindir={master.pg_bindir} '\r\n f'pgdata={master.pg_data_directory} '\r\n f'pgconf={master.pg_config_file} '\r\n f'pgport={master.pg_port} '\r\n f'pghost={pg_host} '\r\n f'op start timeout=60s '\r\n f'op stop timeout=60s '\r\n f'op promote timeout=120s '\r\n f'op demote timeout=120s '\r\n f'op monitor interval=5s timeout=10s role=\"Master\" '\r\n f'op monitor interval=6s timeout=10s role=\"Slave\" '\r\n f'op notify timeout=60s')\r\n master.ha_pcs_xml(\r\n f\"resource master {self.pgha_resource_master} {self.pgha_resource} \"\r\n f\"clone-max=10 notify=true\")\r\n # VIP\r\n ipv4 = self.ha_get_vip_ipv4()\r\n pgha_resource_master_ip = self.pgha_resource_master_ip\r\n master.ha_pcs_xml(\r\n f\"resource create {pgha_resource_master_ip} ocf:heartbeat:IPaddr2 \"\r\n f\"ip={ipv4.ip} cidr_netmask={ipv4.network.prefixlen}\")\r\n master.ha_pcs_xml(\r\n f\"constraint colocation add {pgha_resource_master_ip} \"\r\n f\"with master {self.pgha_resource_master} INFINITY\")\r\n master.ha_pcs_xml(\r\n f\"constraint order promote {self.pgha_resource_master} \"\r\n f\"then start {pgha_resource_master_ip}\")\r\n master.ha_cib_push()\r\n\r\n\r\ndef parse_args():\r\n parser = ArgumentParser(description='Deploy a cluster')\r\n parser.add_argument(\"json_file\", help=\"Cluster definition (JSON)\")\r\n parser.add_argument('--use-threads', action='store_true', default=True)\r\n return parser.parse_args()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n args = parse_args()\r\n start = time()\r\n with open(args.json_file) as f:\r\n cluster = PgHaCluster(\r\n cluster_def=json.load(f), use_threads=args.use_threads)\r\n cluster.deploy()\r\n logging.getLogger(\"main\").debug(f\"took {timedelta(seconds=time() - start)}\")\r\n","repo_name":"ulodciv/cluster_deployer","sub_path":"src/pgha_deployer.py","file_name":"pgha_deployer.py","file_ext":"py","file_size_in_byte":6541,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"}
+{"seq_id":"7221465712","text":"import pygame, sys\ndef draw_floor(): \n screen.blit(floor,(floor_x_pos,600))\n screen.blit(floor,(floor_x_pos+432,600))\n#\npygame.init()\nscreen= pygame.display.set_mode((432,768))\nclock = pygame.time.Clock()\ngravity = 0.25\nbird_movement = 0\n#create background\nbg = pygame.image.load('assets/background-night.png').convert()\nbg = pygame.transform.scale2x(bg)\n#create floor\nfloor = pygame.image.load('assets/floor.png').convert()\nfloor = pygame.transform.scale2x(floor)\nfloor_x_pos = 0\n#create bird\nbird= pygame.image.load('assets/yellowbird-midflap.png').convert()\nbird= pygame.transform.scale2x(bird)\nbird_rect = bird.get_rect(center = (100,384))\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n bird_movement = 0\n bird_movement = -11\n screen.blit(bg,(0,0))\n bird_movement += gravity\n bird_rect.centery += bird_movement\n screen.blit(bird,bird_rect)\n draw_floor()\n if floor_x_pos <= -432:\n floor_x_pos =0\n floor_x_pos -=1\n pygame.display.update()\n clock.tick(120)\n","repo_name":"Dylan1102/flappy-bird","sub_path":"suzy.py","file_name":"suzy.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"34406266586","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n@description: \n\n@author: BaoQiang\n@time: 2017/7/17 17:07\n\"\"\"\n\nfrom nltk.tokenize import word_tokenize, sent_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nfrom nltk.corpus import state_union\nfrom nltk.tokenize import PunktSentenceTokenizer\nimport nltk\nfrom nltk.stem import WordNetLemmatizer\n\n\"\"\"\nnltk.download()\nnltk.download('averaged_perceptron_tagger')\nnltk.download('maxent_ne_chunker')\n\"\"\"\n\n# nltk.download()\n\ndef run7():\n lemmatizer = WordNetLemmatizer()\n for word in ['cats','better','python','best','ran']:\n print(lemmatizer.lemmatize(word))\n\n print(lemmatizer.lemmatize('better',pos='a'))\n\ndef run6():\n train_text = state_union.raw('2005-GWBush.txt')\n sample_text = state_union.raw('2006-GWBush.txt')\n\n custom_sent_tokenizer = PunktSentenceTokenizer(train_text)\n tokenized = custom_sent_tokenizer.tokenize(sample_text)\n\n def process_content():\n for i in tokenized[:3]:\n try:\n words = nltk.word_tokenize(i)\n tagged = nltk.pos_tag(words)\n\n named_entity = nltk.ne_chunk(tagged)\n named_entity.draw()\n\n except Exception as e:\n print(e)\n\n process_content()\n\ndef run5():\n train_text = state_union.raw('2005-GWBush.txt')\n sample_text = state_union.raw('2006-GWBush.txt')\n\n custom_sent_tokenizer = PunktSentenceTokenizer(train_text)\n tokenized = custom_sent_tokenizer.tokenize(sample_text)\n\n def process_content():\n for i in tokenized[:3]:\n try:\n words = nltk.word_tokenize(i)\n tagged = nltk.pos_tag(words)\n\n chunked_gram = \"\"\"Chunk: {<.*>+}\n }+{\"\"\"\n chunked_parser = nltk.RegexpParser(chunked_gram)\n chunked = chunked_parser.parse(tagged)\n\n # print(tagged)\n # print(chunked)\n chunked.draw()\n except Exception as e:\n print(e)\n\n process_content()\n\ndef run4():\n train_text = state_union.raw('2005-GWBush.txt')\n sample_text = state_union.raw('2006-GWBush.txt')\n\n custom_sent_tokenizer = PunktSentenceTokenizer(train_text)\n tokenized = custom_sent_tokenizer.tokenize(sample_text)\n\n def process_content():\n for i in tokenized:\n try:\n words = nltk.word_tokenize(i)\n tagged = nltk.pos_tag(words)\n\n chunked_gram = \"\"\"Chunk: {***}\"\"\"\n chunked_parser = nltk.RegexpParser(chunked_gram)\n chunked = chunked_parser.parse(tagged)\n\n # print(tagged)\n # print(chunked)\n chunked.draw()\n except Exception as e:\n print(e)\n\n process_content()\n\n\ndef run3():\n ps = PorterStemmer()\n example_words = ['python', 'pythoner', 'pythoning', 'pythoned', 'pythonly', ]\n for w in example_words:\n print(ps.stem(w))\n\n new_text = 'It is very important to be pythonly while you are pythoning with python. All pythoners have pythoned poorly at least once.'\n for w in word_tokenize(new_text):\n print(ps.stem(w))\n\n\ndef run2():\n example_text = 'This is a example showing off stop word filtration.'\n\n stop_words = set(stopwords.words('english'))\n # for word in stop_words:\n # print(word)\n\n filterd_sentences = [word for word in word_tokenize(example_text) if word not in stop_words]\n for item in filterd_sentences:\n print(item)\n\n\ndef run():\n example_text = 'Hello Mr. Smith, how are you doing today? The weather is great and Python is awesome. The sky is pinkish-blue. You should not eat cardboard.'\n # example_text = '我饿了,我想吃东西。'\n\n for word in word_tokenize(example_text):\n print(word)\n\n for sent in sent_tokenize(example_text):\n print(sent)\n\n\ndef main():\n # run()\n # run2()\n # run3()\n # run4()\n # run5()\n # run6()\n run7()\n\n\nif __name__ == '__main__':\n main()\n\n'''\nPOS tag list:\nCC\tcoordinating conjunction\nCD\tcardinal digit\nDT\tdeterminer\nEX\texistential there (like: \"there is\" ... think of it like \"there exists\")\nFW\tforeign word\nIN\tpreposition/subordinating conjunction\nJJ\tadjective\t'big'\nJJR\tadjective, comparative\t'bigger'\nJJS\tadjective, superlative\t'biggest'\nLS\tlist marker\t1)\nMD\tmodal\tcould, will\nNN\tnoun, singular 'desk'\nNNS\tnoun plural\t'desks'\nNNP\tproper noun, singular\t'Harrison'\nNNPS\tproper noun, plural\t'Americans'\nPDT\tpredeterminer\t'all the kids'\nPOS\tpossessive ending\tparent's\nPRP\tpersonal pronoun\tI, he, she\nPRP$\tpossessive pronoun\tmy, his, hers\nRB\tadverb\tvery, silently,\nRBR\tadverb, comparative\tbetter\nRBS\tadverb, superlative\tbest\nRP\tparticle\tgive up\nTO\tto\tgo 'to' the store.\nUH\tinterjection\terrrrrrrrm\nVB\tverb, base form\ttake\nVBD\tverb, past tense\ttook\nVBG\tverb, gerund/present participle\ttaking\nVBN\tverb, past participle\ttaken\nVBP\tverb, sing. present, non-3d\ttake\nVBZ\tverb, 3rd person sing. present\ttakes\nWDT\twh-determiner\twhich\nWP\twh-pronoun\twho, what\nWP$\tpossessive wh-pronoun\twhose\nWRB\twh-abverb\twhere, when\n'''\n","repo_name":"xiaoaxe/xiao-youtube-ml","sub_path":"awesomeml/ch03/sec01_nltk.py","file_name":"sec01_nltk.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"9723439910","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport core.models\nimport datetime\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('core', '0008_auto_20170409_2338'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('content', models.TextField(max_length=200)),\n ('pub_date', models.DateTimeField(auto_now_add=True)),\n ],\n ),\n migrations.CreateModel(\n name='Event',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('title', models.TextField(max_length=50)),\n ('date', models.DateField(default=datetime.date.today)),\n ('time', models.TimeField(default=datetime.datetime.now)),\n ('venue', models.TextField(max_length=50)),\n ('description', models.TextField(max_length=500, blank=True, default='')),\n ('icon', models.ImageField(default='users\\\\default_icon.jpg', upload_to=core.models.get_image_path)),\n ('pub_date', models.DateTimeField(auto_now_add=True)),\n ('user', models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='EventAttendees',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('event', models.ForeignKey(null=True, to='core.Event')),\n ('user', models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='EventGallery',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('pic', models.ImageField(blank=True, null=True, upload_to=core.models.get_image_path)),\n ('pub_date', models.DateTimeField(auto_now_add=True)),\n ('event', models.ForeignKey(null=True, to='core.Event')),\n ('user', models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AddField(\n model_name='comment',\n name='event',\n field=models.ForeignKey(null=True, to='core.Event'),\n ),\n migrations.AddField(\n model_name='comment',\n name='user',\n field=models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL),\n ),\n ]\n","repo_name":"anmoljindal/alumni_support_system","sub_path":"core/migrations/0009_auto_20170416_1504.py","file_name":"0009_auto_20170416_1504.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"24710236996","text":"n = int(input())\ndata_set = list(map(int, input().split()))\nminVal = 1e9\nmaxVal = -1e9\n\nfor data in data_set:\n minVal = min(minVal, data)\n maxVal = max(maxVal, data)\n\nprint(minVal, maxVal)\n","repo_name":"wide-world/algorithm-code","sub_path":"BaekJoon/Python/10818_최소,최대.py","file_name":"10818_최소,최대.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"18125859349","text":"#! /usr/bin/env python\nimport csv\nimport json\n\nfrom pathlib import Path\nfrom dl_envs.pursuit.pursuit_env import Action\n\n\nHUNTERS = 4\nPREYS = 1\nFIELD_DIMS = (10, 10)\nEXTRA = ''\nMIN_EPS_RECORD = 2500\nTIMESTAMP = ''\n\n\ndef convert_action(action: int) -> str:\n\t\n\tif action == Action.UP:\n\t\treturn \"Up\"\n\telif action == Action.DOWN:\n\t\treturn \"Down\"\n\telif action == Action.LEFT:\n\t\treturn \"Left\"\n\telif action == Action.RIGHT:\n\t\treturn \"Right\"\n\telse:\n\t\treturn \"Stay\"\n\n\nhistory_dir = (Path(__file__).parent.absolute().parent.absolute() / 'models' / 'pursuit_dqn' /\n\t\t\t ('%dx%d-field%s' % (FIELD_DIMS[0], FIELD_DIMS[1], EXTRA)) / ('%d-hunters' % HUNTERS)) / ('%s' % TIMESTAMP)\nhistory_file = history_dir / ('%d-catch.json' % HUNTERS)\nprocess_history_file = history_dir / ('%d-catch_proccessed.csv' % HUNTERS)\nprocessed_history = []\nheader = ['episode'] + ['hunter_%d_pos' % (idx + 1) for idx in range(HUNTERS)] + ['prey_pos'] + ['hunter_%d_action' % (idx + 1) for idx in range(HUNTERS)]\n\nwith open(history_file, 'r') as f:\n\thistory = json.load(f)\n\nwith open(process_history_file, 'w') as csv_file:\n\twriter = csv.writer(csv_file)\n\twriter.writerow(header)\n\tepisode_nr = 1\n\tfor episode in history:\n\t\tif episode_nr >= MIN_EPS_RECORD:\n\t\t\tfor entry in episode:\n\t\t\t\tstate_parse = entry[0].split(\" \")\n\t\t\t\trow = ([episode_nr] + [(state_parse[idx], state_parse[idx+1]) for idx in range(0, len(state_parse), 5)] +\n\t\t\t\t\t [convert_action(int(entry[idx])) for idx in range(1, len(entry), 2)])\n\t\t\t\twriter.writerow(row)\n\t\tepisode_nr += 1\n\n","repo_name":"miguel-faria/deep_rl","sub_path":"src/pursuit_process_hist.py","file_name":"pursuit_process_hist.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"12407640363","text":"from bit.bit import Bit\nfrom nums.arithmetic import Arithmetic\nfrom nums.bit_operation import BitOperation\n\n\nclass Integer:\n \"\"\"\n Integer의 메모리 구조\n +--------------+----------------------------------------------------------------------+\n | sign (1 bit) | field (31 bit) |\n +--------------+----------------------------------------------------------------------+\n\n 32 bit로 이루어진 integer 값\n\n 양수 음수를 Sign bit를 통해 구분\n field 값을 통해 -(2**30-1)부터 2**30-1까지의 값을 표현함\n \"\"\"\n bit_len = 32\n field_len = bit_len - 1\n limit = 2**bit_len\n frame = [2**i for i in range(bit_len-2, -1, -1)]\n\n def __init__(self, bits: list or str or int = None, sign: Bit = Bit()):\n if type(bits) == str:\n res = self.str_to_int(bits)\n self.sign = res.sign\n self.bits = res.bits\n elif type(bits) == int:\n self.sign = sign\n self.bits = BitOperation.empty_bits(self.field_len)\n self._set(bits)\n elif type(bits) == list:\n self.sign = sign\n self.bits = bits\n else:\n self.sign = sign\n self.bits = BitOperation.empty_bits(self.field_len)\n\n def is_zero(self) -> bool:\n \"\"\"\n 모든 비트가 0인지 확인하는 함수\n :return: 모든 비트가 0인지 여부\n \"\"\"\n return not self.sign and BitOperation.is_empty(self.bits)\n\n @classmethod\n def max_value(cls) -> \"Integer\":\n \"\"\"\n Integer 의 최대값\n :return: Integer 의 최대값 (2**32 - 1)\n \"\"\"\n max_list = [Bit(True) for _ in range(cls.field_len)]\n sign = Bit()\n return Integer(max_list, sign)\n\n @classmethod\n def min_value(cls) -> \"Integer\":\n \"\"\"\n Integer 의 최소값\n :return: Integer 의 최소값 (-(2**32 - 1))\n \"\"\"\n min_list = [Bit(True) for _ in range(cls.field_len)]\n sign = Bit(True)\n return Integer(min_list, sign)\n\n def _set(self, _int: int):\n \"\"\"\n int 값을 통해 integer 를 받기 위한 함수\n\n int 값을 Bit를 통해 int로 어떻게 표현하는 지 로직 확인을 위한 함수\n deprecated\n \"\"\"\n if _int < 0:\n self.sign = Bit(True)\n _int = -_int\n else:\n self.sign = Bit()\n _int = _int % self.limit\n for i, x in enumerate(self.frame):\n self.bits[i].set(bool(_int & x))\n\n @classmethod\n def str_to_int(cls, val: str) -> \"Integer\":\n \"\"\"\n String 값을 통해 integer 값 read\n :param val: String 으로 표현된 정수 값 (공백이 없다는 가정)\n :return: Integer 의 값\n \"\"\"\n if val[0] == '-':\n sign = Bit(True)\n val = val[1:]\n else:\n sign = Bit()\n\n bits = Arithmetic.str_to_integer(val, cls.field_len)\n if len(bits) > cls.field_len:\n sign ^= bits[-cls.field_len-1]\n\n bits = BitOperation.fit_bits(bits, cls.field_len)\n return Integer(bits, sign)\n\n @classmethod\n def char_to_dec(cls, val: str) -> list:\n \"\"\"\n character 0-10의 값으로 읽음\n :param val: 0-10의 문자열\n :return: Integer 객체\n \"\"\"\n return BitOperation.fit_bits(BitOperation.num_map[val], cls.field_len)\n\n def val(self) -> int:\n \"\"\"\n bit 들로 이루어진 값을 int 값으로 읽을 수 있도록 만드는 함수\n 음수 확인은 signed bit를 통해 확인\n\n 테스트 및 출력을 위해 사용하는 함수\n :return: int 값으로 리턴\n \"\"\"\n res = BitOperation.binary_to_decimal(self.bits)\n if self.sign:\n return -res\n return res\n\n def __str__(self) -> str:\n return str(self.val())\n\n def is_negative(self) -> Bit:\n \"\"\"\n Sign 비트를 통해 음수 확인\n :return: 음수인지 여부\n \"\"\"\n return self.sign\n\n def __invert__(self) -> \"Integer\":\n \"\"\"\n Bit invert 연산( ~ )을 위한 operator overloading\n :return: 새로운 Integer 객체로 return\n \"\"\"\n bits = [~bit for bit in self.bits]\n return Integer(bits, self.sign)\n\n def __neg__(self) -> \"Integer\":\n \"\"\"\n sign minus 연산( - )을 위한 operator overloading\n :return: 새로운 Integer 객체로 return\n \"\"\"\n return Integer(self.bits[::], ~self.sign)\n\n def __add__(self, other: \"Integer\") -> \"Integer\":\n \"\"\"\n Binary Add 연산 ( + )을 위한 operator overloading\n 음수는 2의 보수로 변환하여 계산\n :param other: Integer 타입 가정\n :return: 새로운 Integer 객체로 return\n \"\"\"\n if self.is_negative():\n a_bits, _ = Arithmetic.complement_bits(self.bits, self.field_len)\n else:\n a_bits = self.bits[::]\n if other.is_negative():\n b_bits, _ = Arithmetic.complement_bits(other.bits, self.field_len)\n else:\n b_bits = other.bits[::]\n\n res, overflow = Arithmetic.add_bits(a_bits, b_bits, self.field_len)\n sign = self.sign ^ other.sign ^ overflow\n if sign:\n return Integer(Arithmetic.decomplement_bits(res, self.field_len), sign)\n return Integer(res, sign)\n\n def __sub__(self, other: \"Integer\") -> \"Integer\":\n \"\"\"\n Binary Sub 연산 ( - )을 위한 operator overloading\n 음수로 변경한 후 add 연산\n :param other: Integer 타입 가정\n :return: 새로운 Integer 객체로 return\n \"\"\"\n return self + (-other)\n\n def __mul__(self, other: \"Integer\") -> \"Integer\":\n \"\"\"\n Binary Mul 연산 ( * )을 위한 operator overloading\n 덧셈의 반복으로 해결\n :param other: Integer 타입 가정\n :return: 새로운 Integer 객체로 return\n \"\"\"\n return Integer(Arithmetic.mul_bits(self.bits, other.bits, self.field_len), self.sign ^ other.sign)\n\n def __truediv__(self, other: \"Integer\") -> \"Integer\":\n \"\"\"\n Binary Div 연산 ( / )을 위한 operator overloading\n 최고 자리수부터 shift 연산을 통해 뺄셈의 반복으로 해결\n :param other: Integer 타입 가정\n :return: 새로운 Integer 객체로 return\n \"\"\"\n return Integer(Arithmetic.div_bits(self.bits, other.bits, self.field_len), self.sign ^ other.sign)\n\n def __le__(self, other: \"Integer\") -> bool:\n \"\"\"\n Low Equal 연산 ( <= )을 위한 operator overloading\n :param other: Integer 타입 가정\n :return: 새로운 Integer 객체로 return\n \"\"\"\n if self.sign != other.sign:\n return self.sign == Bit()\n\n if self.is_negative():\n return BitOperation.le_bits(other.bits, self.bits, self.field_len)\n return BitOperation.le_bits(self.bits, other.bits, self.field_len)\n\n def __eq__(self, other: \"Integer\") -> bool:\n return self.sign == other.sign and BitOperation.eq_bits(self.bits, other.bits, self.field_len)\n\n def __and__(self, other: \"Integer\") -> \"Integer\":\n \"\"\"\n Bit And 연산( & )을 위한 operator overloading\n :param other: Integer 타입 가정\n :return: 새로운 Integer 객체로 return\n \"\"\"\n return Integer(BitOperation.and_bits(self.bits, other.bits, self.field_len), self.sign & other.sign)\n\n def __xor__(self, other: \"Integer\") -> \"Integer\":\n \"\"\"\n Bit XOR 연산( ^ )을 위한 operator overloading\n :param other: Integer 타입 가정\n :return: 새로운 Integer 객체로 return\n \"\"\"\n return Integer(BitOperation.xor_bits(self.bits, other.bits, self.field_len), self.sign ^ other.sign)\n\n def __or__(self, other: \"Integer\") -> \"Integer\":\n \"\"\"\n Bit OR 연산( | )을 위한 operator overloading\n :param other: Integer 타입 가정\n :return: 새로운 Integer 객체로 return\n \"\"\"\n return Integer(BitOperation.or_bits(self.bits, other.bits, self.field_len), self.sign | other.sign)\n\n def __lshift__(self, num: int) -> \"Integer\":\n \"\"\"\n num 만큼 left shift ( << ) 연산을 위한 operator overloading\n :param num: shift 하는 크기\n :return: 새로운 Integer 객체로 return\n \"\"\"\n res, _ = BitOperation.lshift_bits(self.bits, num, self.field_len)\n return Integer(res)\n","repo_name":"HyeockJinKim/Study-CS","sub_path":"integer/integer.py","file_name":"integer.py","file_ext":"py","file_size_in_byte":8594,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"5949286514","text":"import pytest\n\nimport sys, os\nsys.path.append(os.path.realpath(os.path.dirname(__file__)+\"/..\"))\n\nimport numpy as np\n\n\ndef test_single_strat_call():\n\n from rivers2stratigraphy.gui import GUI\n from rivers2stratigraphy.strat import Strat\n\n gui = GUI()\n gui.strat = Strat(gui)\n\n gui.strat(i=0)\n\n assert len(gui.strat.activeChannelPatches) == 2\n assert not gui.strat.channelBodyList # list is empty\n\n\ndef test_convert_active_channel_to_channel_body():\n\n from rivers2stratigraphy.gui import GUI\n from rivers2stratigraphy.strat import Strat\n\n gui = GUI()\n gui.strat = Strat(gui)\n\n for i in range(int(gui.sm.Ta / gui.sm.dt)+2):\n gui.strat(i=i)\n\n assert len(gui.strat.channelBodyList) == 1\n\n\ndef test_gui_strat_call():\n\n from rivers2stratigraphy.gui import GUI\n from rivers2stratigraphy.strat import Strat\n\n np.random.seed(0)\n\n gui = GUI()\n gui.strat = Strat(gui)\n\n gui.strat(i=0)\n\n return gui.fig\n\n\ndef test_gui_convert_to_channel_body_call():\n\n from rivers2stratigraphy.gui import GUI\n from rivers2stratigraphy.strat import Strat\n\n np.random.seed(0)\n\n gui = GUI()\n gui.strat = Strat(gui)\n\n for i in np.arange(gui.sm.Ta / gui.sm.dt + 3):\n gui.strat(i=i)\n\n return gui.fig\n","repo_name":"sededu/rivers2stratigraphy","sub_path":"tests/test_r2s_strat.py","file_name":"test_r2s_strat.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"}
+{"seq_id":"4939339557","text":"\"\"\"\nFit scaling model to unmerged corrected intensities\n\"\"\"\n\nimport argparse\n\nimport numpy as np\nimport pandas as pd\n\nfrom mdx2.utils import saveobj, loadobj\nfrom mdx2.data import HKLTable\nfrom mdx2.scaling import ScaledData, ScalingModel\n\ndef parse_arguments():\n \"\"\"Parse commandline arguments\"\"\"\n\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n # Required arguments\n parser.add_argument(\"hkl\", help=\"NeXus file with hkl_table\")\n parser.add_argument('--smoothness',type=float,default=1,help=\"amount to multiply the regularization parameter\")\n parser.add_argument('--phi_increment',type=float,default=1,metavar=\"DEGREES\",help=\"spacing of phi control points in degrees\")\n parser.add_argument('--iter',type=int,default=5,help=\"number of iterations\")\n parser.add_argument(\"--outfile\", default=\"scales.nxs\", help=\"name of the output NeXus file\")\n\n return parser\n\ndef run(args=None):\n parser = parse_arguments()\n args = parser.parse_args(args)\n\n hkl = loadobj(args.hkl,'hkl_table')\n\n print('Grouping redundant observations')\n (h,k,l), index_map, counts = hkl.unique()\n\n S = ScaledData(hkl.intensity,hkl.intensity_error,index_map,hkl.phi)\n\n # for phi axis, just estimate the range to the nearest degree, and put the samples at one degree increments.\n # seems reasonable but might fail in some situations...\n dphi = args.phi_increment # one degree increments\n phi_min = np.floor(hkl.phi.min()*dphi)/dphi\n phi_max = np.ceil(hkl.phi.max()*dphi)/dphi\n nsamp = np.round((phi_max-phi_min)/dphi).astype(int) + 1\n phi_points = np.linspace(phi_min,phi_max,nsamp)\n phi_vals = np.ones_like(phi_points)\n\n print(f'initializing scaling model with {phi_points.size} samples')\n Model = ScalingModel(phi_points,phi_vals)\n\n for j in range(args.iter):\n print(f'iteration {j+1} of {args.iter}')\n print(' re-calculating scales')\n S.apply(Model)\n print(' merging')\n Im,sigmam,counts = S.merge()\n print(' fitting the model')\n Model,x2 = S.fit(Model,Im,args.smoothness)\n print(f' current x2: {x2}')\n\n print('finished refining')\n\n #hkl.intensity=S.I.filled(fill_value=np.nan).astype(np.float32)\n #hkl.intensity_error=S.sigma.filled(fill_value=np.nan).astype(np.float32)\n #hkl.scale = S.scale.astype(np.float32)\n\n #saveobj(hkl,args.outfile,name='hkl_table',append=False)\n saveobj(Model,args.outfile,name='scaling_model',append=False)\n\n print('done!')\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"ando-lab/mdx2","sub_path":"mdx2/command_line/scale.py","file_name":"scale.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"}
+{"seq_id":"22705998789","text":"import os\nimport time\n\nimport torch\nimport torchvision\nimport pro_gan_pytorch.PRO_GAN as pg\n\nfrom dataset import TextDataset\n\nPARENT_DIR = os.path.dirname(__file__)\n\n# select the device to be used for training\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nckpt_dir = 'ckpt-coco'\n\ndef setup_data(batch_size, size):\n \"\"\"\n setup the dataset for training the CNN\n :param batch_size: batch_size for sgd\n :return: classes, trainloader, testloader => training and testing data loaders\n \"\"\"\n\n transform = torchvision.transforms.Compose([\n torchvision.transforms.Resize((128, 128)),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize((0.5,)*3, (0.5,)*3),\n ])\n\n trainset = TextDataset(path=os.path.join(PARENT_DIR, 'coco_captions.txt'), size=size, transform=transform)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,\n shuffle=True,\n num_workers=4)\n\n # testset = torchvision.datasets.CIFAR10(root=data_path,\n # transform=transforms, train=False,\n # download=False)\n # testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,\n # shuffle=True,\n # num_workers=num_workers)\n\n return trainloader#, testloader\n\n\ndef save_checkpoint(pro_gan, current_depth, epoch, ckpt_dir):\n state = {\n 'gen':pro_gan.gen.state_dict(),\n 'gen_optim':pro_gan.gen_optim.state_dict(),\n 'dis':pro_gan.dis.state_dict(),\n 'dis_optim':pro_gan.dis_optim.state_dict(),\n 'current_depth': current_depth,\n 'epoch': epoch,\n }\n os.makedirs(os.path.join(PARENT_DIR, ckpt_dir), exist_ok=True)\n basename = 'checkpoint-{}-{}.ckpt'.format(current_depth, epoch)\n filename = os.path.join(PARENT_DIR, ckpt_dir, basename)\n torch.save(state, filename)\n checkpoint_file = os.path.join(PARENT_DIR, ckpt_dir, 'checkpoint')\n with open(checkpoint_file, 'w') as f:\n f.write(basename)\n print('Saved {}'.format(basename))\n\n\ndef load_checkpoint(ckpt_dir):\n checkpoint_file = os.path.join(PARENT_DIR, ckpt_dir, 'checkpoint')\n if not os.path.isfile(checkpoint_file):\n return None\n with open(checkpoint_file) as f:\n basename = f.read()\n filename = os.path.join(PARENT_DIR, ckpt_dir, basename)\n state = torch.load(filename)\n print('Loaded {}'.format(basename))\n return state\n\n\nif __name__ == '__main__':\n\n # some parameters:\n depth = 6\n # num_epochs = 100 # number of epochs per depth (resolution)\n num_epochs = 10 # number of epochs per depth (resolution)\n latent_size = 512\n\n # get the data. Ignore the test data and their classes\n # _, train_data_loader, _ = setup_data(batch_size=32, num_workers=3, download=True)\n train_data_loader = setup_data(batch_size=128, size=128000)\n\n # ======================================================================\n # This line creates the PRO-GAN\n # ======================================================================\n pro_gan = pg.ProGAN(depth=depth, latent_size=latent_size, device=device)\n # ======================================================================\n\n start = time.time()\n\n # train the pro_gan using the cifar-10 data\n for current_depth in range(depth):\n # print(\"working on depth:\", current_depth)\n\n # note that the rest of the api indexes depth from 0\n for epoch in range(1, num_epochs + 1):\n # print(\"\\ncurrent_epoch: \", epoch)\n\n # calculate the value of aplha for fade-in effect\n # alpha = epoch / num_epochs\n # print(\"value of alpha:\", alpha)\n\n # iterate over the dataset in batches:\n for i, batch in enumerate(train_data_loader, 1):\n alpha = (epoch - 1 + i / len(train_data_loader)) / num_epochs\n\n images = batch\n images = images.to(device)\n # generate some random noise:\n noise = torch.randn(images.shape[0], latent_size).to(device)\n\n # optimize discriminator:\n dis_loss = pro_gan.optimize_discriminator(noise, images, current_depth, alpha)\n\n # optimize generator:\n gen_loss = pro_gan.optimize_generator(noise, current_depth, alpha)\n\n end = time.time()\n delay = end - start\n start = end\n\n print(\"Depth: %d Epoch: %d Batch: %d dis_loss: %.3f gen_loss: %.3f time: %.3f\"\n % (current_depth, epoch, i, dis_loss, gen_loss, delay))\n\n # print(\"epoch finished ...\")\n if epoch % 1 == 0:\n save_checkpoint(pro_gan, current_depth, epoch, ckpt_dir)\n print(\"training complete ...\")\n save_checkpoint(pro_gan, current_depth, epoch, ckpt_dir)\n","repo_name":"bshimanuki/imagetext_pgan","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"17874390168","text":"from django.urls import path\n\nfrom customers import views\n\n\napp_name = 'customers'\n\n\nurlpatterns = [\n path('', views.index, name='home'),\n path('user/login/', views.user_login, name='login'),\n path('user/logout/', views.user_logout, name='logout'),\n path('user/register/', views.user_register, name='registration'),\n path('user/profile/edit/', views.profile_edit, name='profile_edit'),\n path('user/profile/delete/', views.profile_delete, name='profile_delete'),\n path('user/profile//', views.profile_detail, name='profile_detail'),\n\n]\n","repo_name":"SergeyGumenuk/helpdesk","sub_path":"helpdesk/customers/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"41903570910","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\nn = int(input())\nalpha=list(input().split())\nk = int(input())\nfrom itertools import combinations\nlis = list(combinations(alpha,k))\ncount = 0\nfor i in range(len(lis)):\n if 'a' in lis[i]:\n count+=1\n\nj = len(lis)\nprint(count/j)\n \n","repo_name":"Makda-Yoseph/computative-programming","sub_path":"probability_of_a.py","file_name":"probability_of_a.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"10058914988","text":"from tkinter import *\n\ndef calculated_button_clicked():\n km = float(entry.get()) / 0.6214\n num_km_label[\"text\"] = f\"{km}\"\n\n#Window\nwindow = Tk()\nwindow.title(\"Mile to Km Converter\")\nwindow.minsize(width=300, height= 150)\nwindow.config(padx=20, pady=20)\n\n#Entry\nentry = Entry(width=20)\nentry.grid(column=1, row= 0)\n\n#Label\nmiles_label = Label()\nmiles_label[\"text\"] = \"Miles\"\nmiles_label.grid(column=2, row= 0)\n\nis_equal_to_label = Label()\nis_equal_to_label[\"text\"] = \"is equal to\"\nis_equal_to_label.grid(column=0, row=1)\n\nnum_km_label = Label()\nnum_km_label[\"text\"] = \"0\"\nnum_km_label.grid(column=1, row=1)\n\nkm_label = Label()\nkm_label[\"text\"] = \"Km\"\nkm_label.grid(column=2, row=1)\n\n#Button\nbutton = Button(text=\"Calculate\", command=calculated_button_clicked)\nbutton.grid(column=1, row=2)\n\nwindow.mainloop()","repo_name":"edgaradrian/PythonStudy","sub_path":"100 Days of Code/Day27/mile-to-kilometer-converter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"39569067722","text":"n = int(input())\np = list(map(int, input().split()))\nchilds = [0]*n\nfor i in range(n):\n\tif p[i] != -1:\n\t\tchilds[p[i]] += 1\nk = int(input())\nif p[k] != -1: childs[p[k]] -= 1\np[k] = -2\ncnt = 0\nfor i in range(n):\n\tif childs[i] != 0: continue\n\t#\tget root\n\tt = p[i]\n\twhile t >= 0: t = p[t]\n\tif t == -1: cnt += 1\nprint(cnt)\n","repo_name":"hachira/algbootcamp2020","sub_path":"lab20-1/lab.py","file_name":"lab.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"}
+{"seq_id":"37578532355","text":"import inspect\nimport sys\nfrom io import StringIO\nfrom types import MethodType\n\nimport _pytest\nfrom _pytest.python import fillfixtures, xunitsetup, Instance, FixtureManager, NOTSET, getfixturemarker, \\\n defaultfuncargprefixmarker, FixtureFunctionMarker, call_fixture_func, getimfunc\nimport py\nimport pytest\nfrom pytest_plugin.mapping_fixture_action import ItemState, fixture_info\nfrom pytest_plugin.rest_erp_client import ErpActions\n\n\n__author__ = 'kiryl_zayets'\n\n\nclass ExtModule(_pytest.python.Module):\n def setup(self):\n setup_module = xunitsetup(self.obj, \"setUpModule\")\n if setup_module is None:\n setup_module = xunitsetup(self.obj, \"setup_module\")\n try:\n if setup_module is not None:\n if inspect.getargspec(setup_module)[0]:\n setup_module(self.obj)\n else:\n setup_module()\n except Exception:\n info = sys.exc_info()\n raise\n # else:\n # item = ItemState(fixture_info(FixtureMapping.BEFORE_TEST))\n # item.name = \"setup_module\"\n # item.activate()\n\n fin = getattr(self.obj, 'tearDownModule', None)\n if fin is None:\n fin = getattr(self.obj, 'teardown_module', None)\n if fin is not None:\n if inspect.getargspec(fin)[0]:\n finalizer = lambda: fin(self.obj)\n else:\n finalizer = fin\n # item.has_teardown = True\n\n self.addfinalizer(finalizer)\n\n\nclass ExtClass(_pytest.python.Class):\n def setup(self):\n setup_class = xunitsetup(self.obj, 'setup_class')\n if setup_class is not None:\n setup_class = getattr(setup_class, 'im_func', setup_class)\n setup_class = getattr(setup_class, '__func__', setup_class)\n setup_class(self.obj)\n # item = ItemState(fixture_info(FixtureMapping.\n # BEFORE_CLASS))\n # item.name = 'setup_class'\n # item.activate(item)\n fin_class = getattr(self.obj, 'teardown_class', None)\n if fin_class is not None:\n fin_class = getattr(fin_class, 'im_func', fin_class)\n fin_class = getattr(fin_class, '__func__', fin_class)\n self.addfinalizer(lambda: fin_class(self.obj))\n # item.has_teardown = True\n\n\nclass ExtFunctionMixin(_pytest.python.FunctionMixin):\n def setup(self):\n \"\"\" perform setup for this test function. \"\"\"\n if hasattr(self, '_preservedparent'):\n obj = self._preservedparent\n elif isinstance(self.parent, Instance):\n obj = self.parent.newinstance()\n self.obj = self._getobj()\n else:\n obj = self.parent.obj\n if inspect.ismethod(self.obj):\n setup_name = 'setup_method'\n teardown_name = 'teardown_method'\n # item = ItemState(fixture_info(FixtureMapping.BEFORE_METHOD))\n # item.name = 'setup_method'\n else:\n setup_name = 'setup_function'\n teardown_name = 'teardown_function'\n # item = ItemState(fixture_info(FixtureMapping.BEFORE_FUNCTION))\n # item.name = 'setup_function'\n setup_func_or_method = xunitsetup(obj, setup_name)\n if setup_func_or_method is not None:\n setup_func_or_method(self.obj)\n # item.activate(item)\n fin = getattr(obj, teardown_name, None)\n if fin is not None:\n self.addfinalizer(lambda: fin(self.obj))\n # item.has_teardown = True\n\n\nclass ExtFunction(_pytest.python.Function):\n def __init__(self, wrappee):\n self.wrappee = wrappee\n\n def __getattr__(self, item):\n return getattr(self.wrappee, item)\n\n def __setattr__(self, key, value):\n if key is 'wrappee':\n self.__dict__[key] = value\n else:\n self.__dict__['wrappee'].__dict__[key] = value\n\n def setup(self):\n try:\n self.callspec._emptyparamspecified\n except AttributeError:\n pass\n else:\n fs, lineno = self._getfslineno()\n pytest.skip(\"got empty parameter set, function %s at %s:%d\" % (\n self.function.__name__, fs, lineno))\n ExtFunctionMixin.setup(self)\n try:\n request = self._request\n self._request._pyfuncitem = self._pyfuncitem\n except:\n fillfixtures(self)\n else:\n item = request._pyfuncitem\n fixturenames = getattr(item, \"fixturenames\", request.fixturenames)\n for argname in fixturenames:\n if argname not in item.funcargs:\n item.funcargs[argname] = request.getfuncargvalue(argname)\n\n\nclass ExtSetupState(_pytest.runner.SetupState):\n def _callfinalizers(self, colitem):\n finalizers = self._finalizers.pop(colitem, None)\n exc = None\n while finalizers:\n fin = finalizers.pop()\n try:\n fin()\n # if str(fin).find('ExtFunctionMixin.setup') > 0:\n # if inspect.ismethod(colitem.obj):\n # ItemState.deactivate('setup_method')\n # else:\n # ItemState.deactivate('setup_function')\n # elif str(fin).find('ExtModule.setup') > 0:\n # ItemState.deactivate('setup_module')\n # elif str(fin).find('ExtClass.setup') > 0:\n # ItemState.deactivate('setup_class')\n except Exception:\n if exc is None:\n exc = sys.exc_info()\n if exc:\n py.builtin._reraise(*exc)\n\n\nclass ExtFixtureManager(FixtureManager):\n def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):\n if nodeid is not NOTSET:\n holderobj = node_or_obj\n else:\n holderobj = node_or_obj.obj\n nodeid = node_or_obj.nodeid\n if holderobj in self._holderobjseen:\n return\n self._holderobjseen.add(holderobj)\n autousenames = []\n for name in dir(holderobj):\n obj = getattr(holderobj, name, None)\n if not callable(obj):\n continue\n # fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)\n # or are \"@pytest.fixture\" marked\n marker = getfixturemarker(obj)\n if marker is None:\n if not name.startswith(self._argprefix):\n continue\n marker = defaultfuncargprefixmarker\n name = name[len(self._argprefix):]\n elif not isinstance(marker, FixtureFunctionMarker):\n # magic globals with __getattr__ might have got us a wrong\n # fixture attribute\n continue\n else:\n assert not name.startswith(self._argprefix)\n fixturedef = ExtFixtureDef(self, nodeid, name, obj,\n marker.scope, marker.params,\n yieldctx=marker.yieldctx,\n unittest=unittest, ids=marker.ids)\n faclist = self._arg2fixturedefs.setdefault(name, [])\n if fixturedef.has_location:\n faclist.append(fixturedef)\n else:\n # fixturedefs with no location are at the front\n # so this inserts the current fixturedef after the\n # existing fixturedefs from external plugins but\n # before the fixturedefs provided in conftests.\n i = len([f for f in faclist if not f.has_location])\n faclist.insert(i, fixturedef)\n if marker.autouse:\n autousenames.append(name)\n if autousenames:\n self._nodeid_and_autousenames.append((nodeid or '', autousenames))\n\n\nclass ExtFixtureDef(_pytest.python.FixtureDef):\n\n def execute(self, request):\n # get required arguments and register our own finish()\n # with their finalization\n kwargs = {}\n for argname in self.argnames:\n fixturedef = request._get_active_fixturedef(argname)\n result, arg_cache_key = fixturedef.cached_result\n kwargs[argname] = result\n if argname != \"request\":\n fixturedef.addfinalizer(self.finish)\n\n my_cache_key = request.param_index\n cached_result = getattr(self, \"cached_result\", None)\n if cached_result is not None:\n # print argname, \"Found cached_result\", cached_result\n # print argname, \"param_index\", param_index\n result, cache_key = cached_result\n if my_cache_key == cache_key:\n # print request.fixturename, \"CACHE HIT\", repr(my_cache_key)\n return result\n # print request.fixturename, \"CACHE MISS\"\n # we have a previous but differently parametrized fixture instance\n # so we need to tear it down before creating a new one\n self.finish()\n assert not hasattr(self, \"cached_result\")\n\n if self.unittest:\n result = self.func(request.instance, **kwargs)\n else:\n fixturefunc = self.func\n # the fixture function needs to be bound to the actual\n # request.instance so that code working with \"self\" behaves\n # as expected.\n if request.instance is not None:\n fixturefunc = getimfunc(self.func)\n if fixturefunc != self.func:\n fixturefunc = fixturefunc.__get__(request.instance)\n\n old_std, old_err = sys.stdout, sys.stderr\n new_one_info, new_one_error = StringIO(), StringIO()\n sys.stdout, sys.stderr = new_one_info, new_one_error\n\n _fixture = fixture_info(self.scope, ErpActions.fixture, None, fixturefunc._pytestfixturefunction.autouse)\n item = ItemState(self.argname, fixturefunc.__doc__, fixture_mapping=_fixture).activate()\n ItemState.current_fixture = item\n result = call_fixture_func(fixturefunc, request, kwargs,\n self.yieldctx)\n # _fixture(result)\n\n info = new_one_info.getvalue()\n err = new_one_error.getvalue()\n sys.stdout, sys.stderr = old_std, old_err\n item.stdout, item.stderr = info, err\n # item = ItemState(self.argname, fixturefunc.__doc__, fixture_mapping=_fixture, out=info, err=err).activate()\n ErpActions.log(item)\n item.here_and_now_deactivate()\n self.cached_result = (result, my_cache_key)\n return result\n\n\n def finish(self):\n \"\"\"\n Finale for fixtures\n \"\"\"\n while self._finalizer:\n func = self._finalizer.pop()\n old_std, old_err = sys.stdout, sys.stderr\n new_one_info, new_one_error = StringIO(), StringIO()\n sys.stdout, sys.stderr = new_one_info, new_one_error\n func()\n info = new_one_info.getvalue()\n err = new_one_error.getvalue()\n sys.stdout, sys.stderr = old_std, old_err\n if not type(func) is MethodType:\n _fixture = fixture_info(scope=self.scope, action=ErpActions.start_teardown_fixture)\n item = ItemState(self.argname,func.__doc__, fixture_mapping=_fixture, out=info, err=err).activate()\n ErpActions.log(item)\n item.here_and_now_deactivate()\n try:\n del self.cached_result\n except AttributeError:\n pass","repo_name":"kiryl-zaytes/python-and-I","sub_path":"report_portal/pytest_plugin/ext_fixtures_behaviour.py","file_name":"ext_fixtures_behaviour.py","file_ext":"py","file_size_in_byte":11657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"23773463113","text":"from terminusdb_client import Client\nfrom terminusdb_client.woqlschema import WOQLSchema\n\n# For Terminus X, use the following\n# client = Client(\"https://cloud.terminusdb.com//\")\n# client.connect(db=\"demo_workshop\", team=\"\", use_token=True)\n\nclient = Client(\"http://127.0.0.1:6363/\")\nclient.connect(db=\"getting_started\")\n\ndata_schema = WOQLSchema()\ndata_schema.from_db(client)\n\n# Update a document\n\ndestiny_raw = client.get_document(\"Employee/001\")\ndestiny = data_schema.import_objects(destiny_raw)\n\ndestiny.address.postcode = \"PH12 3RP\"\ndestiny.address.street = \"Lairg Road\"\ndestiny.address.street_num = 73\ndestiny.address.town = \"Newbigging\"\n\nclient.update_document(destiny, commit_msg=\"Update Destiny\")\n\n# Linking a new document to an old document\n\nEmployee = data_schema.object.get(\"Employee\")\nAddress = data_schema.object.get(\"Address\")\nTeam = data_schema.object.get(\"Team\")\n\nethan_address = Address(\n postcode=\"IV27 2TG\", street=\"Shore Street\", street_num=84, town=\"Stoer\"\n)\n\nmanager_raw = client.get_document(\"Employee/004\")\nethan_manager = data_schema.import_objects(manager_raw)\n\nethan = Employee(\n _id=\"Employee/005\",\n name=\"Ethan Abbott\",\n title=\"Backend Developer\",\n team=Team.it,\n contact_number=\"070 7796 8035\",\n address=ethan_address,\n manager=ethan_manager,\n)\n\nclient.update_document(ethan, commit_msg=\"Adding Ethan\")\n","repo_name":"terminusdb/terminusdb-tutorials","sub_path":"getting_started/python-client/update_data.py","file_name":"update_data.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"67"}
+{"seq_id":"3493337841","text":"from itertools import combinations\nfrom math import gcd\n\ndef load_input(source):\n data = []\n with open(source) as file:\n for line in file:\n data.append(line.strip())\n if len(data) == 1:\n data = data[0]\n return data\n\nclass Moon:\n def __init__(self, position=(0,0,0), velocity=(0,0,0)):\n self.x = position[0]\n self.y = position[1]\n self.z = position[2]\n self.vx = velocity[0]\n self.vy = velocity[1]\n self.vz = velocity[2]\n\n def __repr__(self):\n return f\"pos=, vel=\"\n\n def update_position_by_velocity(self):\n self.x += self.vx\n self.y += self.vy\n self.z += self.vz\n\n def get_potential_energy(self):\n return abs(self.x) + abs(self.y) + abs(self.z)\n\n def get_kinetic_energy(self):\n return abs(self.vx) + abs(self.vy) + abs(self.vz)\n\n def get_total_energy(self):\n return self.get_kinetic_energy() * self.get_potential_energy()\n\n def apply_gravity(self, other):\n if self == other:\n return\n if self.x != other.x:\n if self.x > other.x:\n self.vx -= 1\n else:\n self.vx += 1\n if self.y != other.y:\n if self.y > other.y:\n self.vy -= 1\n else:\n self.vy += 1\n if self.z != other.z:\n if self.z > other.z:\n self.vz -= 1\n else:\n self.vz += 1\n\n def get_pos(self):\n return (self.x, self.y, self.z)\n\n def get_vel(self):\n return (self.vx, self.vy, self.vz)\n\ndef create_moons(coordinates):\n moons = []\n for line in coordinates:\n data = [int(x.split(\"=\")[1]) for x in line[1:-1].split(\",\")]\n moons.append(Moon(data))\n return moons\n\ndef apply_gravity(moons):\n for x, y in combinations(moons, 2):\n x.apply_gravity(y)\n y.apply_gravity(x)\n\ndef first_star(data):\n moons = create_moons(data)\n for i in range(1000): \n apply_gravity(moons)\n for moon in moons:\n moon.update_position_by_velocity()\n return sum([x.get_total_energy() for x in moons])\n\ndef find_lcm(arr):\n\n # Assume it's the first number\n lcm = arr[0]\n\n # Once for every number in the array\n for i in arr[1:]:\n\n # Calculate the lcm\n # lcm = num*other_num/GCD(num,other_num)\n lcm = int(lcm * i / gcd(lcm, i))\n\n return lcm\n\ndef solve_axis(data, index):\n moons = create_moons(data)\n step = 0\n while True:\n step += 1\n apply_gravity(moons)\n for i in range(len(moons)):\n moons[i].update_position_by_velocity()\n if all([x.get_vel()[index] == 0 for x in moons]):\n return step\n\ndef second_star(data):\n x = solve_axis(data, 0)\n y = solve_axis(data, 1)\n z = solve_axis(data, 2)\n lcm = find_lcm((x,y,z))\n return lcm * 2\n\ndef solution(source):\n data = load_input(source)\n print(\"Day 12\")\n print(\"First star:\", str(first_star(data)))\n print(\"Second star:\", str(second_star(data)))\n print(\"-------------------------------------\")\n\nif __name__ == \"__main__\":\n solution(\"input.txt\")\n \n","repo_name":"Peritract/adventofcode","sub_path":"2019/day_12.py","file_name":"day_12.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"2133225415","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport pandas as pd\nimport numpy as np\n\n\n# In[2]:\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# In[3]:\n\nget_ipython().magic('matplotlib inline')\n\n\n# In[4]:\n\ntrain = pd.read_csv('advertising.csv')\n\n\n# In[5]:\n\ntrain.head()\n\n\n# In[6]:\n\nad_data = pd.read_csv(\"advertising.csv\")\n\n\n# In[7]:\n\nad_data.head()\n\n\n# In[8]:\n\nad_data.describe()\n\n\n# In[9]:\n\nad_data['Age'].hist()\n\n\n# In[10]:\n\nsns.jointplot(x='Area Income', y='Age', data=ad_data)\n\n\n# In[11]:\n\nsns.jointplot(x='Daily Time Spent on Site', y='Age',kind=kde,data=ad_data)\n\n\n# In[12]:\n\nsns.jointplot(x='Daily Time Spent on Site', y='Age',kind='kde',data=ad_data)\n\n\n# In[13]:\n\nsns.jointplot(x='Daily Time Spent on Site', y='Daily Internet Usage',data=ad_data)\n\n\n# In[14]:\n\nsns.pairplot(data=ad_data,hue='Clicked on Ad')\n\n\n# In[15]:\n\nad_data.drop('Add Topic Line',axis=1,inplace=True)\n\n\n# In[16]:\n\nad_data.columns\n\n\n# In[17]:\n\nad_data.head()\n\n\n# In[18]:\n\nad_data.drop(['Ad Topic Line','City','Country','Timestamp'], axis=1,inplace=True)\n\n\n# In[19]:\n\nad_data.head()\n\n\n# In[20]:\n\nX = ad_data.drop('Age',axis=1)\ny = ad_data['Age'] #is the column your trying to predict, the label to be applied\n\n\n# In[21]:\n\nfrom sklearn.cross_validation import train_test_split\n\n\n# In[22]:\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101)\n\n\n# In[23]:\n\nfrom sklearn.linear_model import LogisticRegression\n\n\n# In[24]:\n\nlogmodel = LogisticRegression()\n\n\n# In[25]:\n\nlogmodel.fit(X_train,y_train)\n\n\n# In[26]:\n\npredictions = logmodel.predict(X_test)\n\n\n# In[27]:\n\nfrom sklearn.metrics import classification_report\n\n\n# In[28]:\n\nprint(classification_report(y_test, predictions))\n\n\n# In[29]:\n\nfrom sklearn.metrics import confusion_matrix\n\n\n# In[30]:\n\nconfusion_matrix(y_test,predictions)\n\n\n# In[31]:\n\nX = ad_data.drop('Male',axis=1)\ny = ad_data['Male'] #is the column your trying to predict, the label to be applied\n\n\n# In[32]:\n\nfrom sklearn.cross_validation import train_test_split\n\n\n# In[33]:\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101)\n\n\n# In[34]:\n\nfrom sklearn.linear_model import LogisticRegression\n\n\n# In[35]:\n\nlogmodel = LogisticRegression()\n\n\n# In[36]:\n\nlogmodel.fit(X_train,y_train)\n\n\n# In[37]:\n\npredictions = logmodel.predict(X_test)\n\n\n# In[38]:\n\nfrom sklearn.metrics import classification_report\n\n\n# In[39]:\n\nprint(classification_report(y_test, predictions))\n\n\n# In[40]:\n\nfrom sklearn.metrics import confusion_matrix\n\n\n# In[41]:\n\nconfusion_matrix(y_test,predictions)\n\n\n# In[ ]:\n\n\n\n","repo_name":"autonomous019/Artificial-Intelligence-Research","sub_path":"logistic-regression-diff-parameters (1).py","file_name":"logistic-regression-diff-parameters (1).py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"71741432534","text":"n = int(input())\ndp = [False, False] + [True] * (n - 1)\nprimes = [0]\n\nfor i in range(1, n + 1):\n if dp[i]:\n primes.append(i)\n j = 2\n while i * j <= n:\n dp[i * j] = False\n j += 1\n\nfor i in range(1, len(primes)):\n primes[i] = primes[i - 1] + primes[i]\n\nleft, right = 0, 1\ncnt = 0\n\nwhile right < len(primes):\n _sum = primes[right] - primes[left]\n if _sum == n:\n cnt += 1\n left += 1\n right += 1\n elif _sum < n:\n right += 1\n elif _sum > n:\n left += 1\n\nprint(cnt)","repo_name":"devyuseon/problem-solving","sub_path":"boj/1644.py","file_name":"1644.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"12680502980","text":"import json\nfrom typing import List\n\nindexpath = 'index_rotation.json'\n\n\ndef index_rotation(index: dict):\n print('\\nGenerating rotation index')\n words = index.keys()\n\n data = {}\n for word in words:\n for rotation in rotations(word):\n data[rotation] = word\n\n with open(indexpath, 'w+') as index_file:\n json.dump(data, index_file)\n\n print('\\nFinished generating rotation index')\n\n\ndef rotations(word: str) -> List[str]:\n w = f'{word}$'\n result = [w]\n for i in range(0, len(w)):\n w = f'{w[1:]}{w[0]}'\n result.append(w)\n return result\n","repo_name":"amukhopad/nlp-infosearch","sub_path":"pysearch/index/rotation.py","file_name":"rotation.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"40601185888","text":"import unittest\nfrom unittest import mock\n\nimport numpy as np\nimport pandas as pd\nfrom tensorflow import keras\n\nfrom ryan_adams.ryan_adams import RyanAdams\n\n\nclass TestRyanAdams(unittest.TestCase):\n @unittest.skip('need to implement')\n def test_keras_compatability_layer_tracking(self):\n assert False\n\n @mock.patch('ryan_adams.RyanAdams.__init__')\n def test__build_component_layers(self, mock___init__):\n t = object()\n id = object()\n mock___init__.return_value = None\n mock_trend1, mock_trend2 = mock.Mock(n_items=1), mock.Mock(n_items=2)\n ryan_adams = RyanAdams()\n ryan_adams._base_inputs = {'t': t, 'id': id}\n\n ryan_adams._build_component_layers([mock_trend1, mock_trend2])\n mock_trend1.assert_called_with([t, None])\n mock_trend2.assert_called_with([t, id])\n\n def test__build_component_layer_n_items_greater_than_1(self):\n t = object()\n id = object()\n mock_trend = mock.Mock(n_items=2)\n mock_ryan_adams = mock.Mock(_base_inputs={'t': t, 'id': id})\n\n RyanAdams._build_component_layer(mock_ryan_adams, mock_trend)\n mock_trend.assert_called_with([t, id])\n\n def test__build_component_layer_n_items_equal_1(self):\n t = object()\n id = object()\n mock_trend = mock.Mock(n_items=1)\n mock_ryan_adams = mock.Mock(_base_inputs={'t': t, 'id': id})\n\n RyanAdams._build_component_layer(mock_ryan_adams, mock_trend)\n mock_trend.assert_called_with([t, None])\n\n def test__build_component_layer_n_items_greater_than_1(self):\n t = object()\n id = object()\n mock_trend = mock.Mock(n_items=2)\n mock_ryan_adams = mock.Mock(_base_inputs={'t': t, 'id': id})\n\n RyanAdams._build_component_layer(mock_ryan_adams, mock_trend)\n mock_trend.assert_called_with([t, id])\n\n def test__build_prophet_layer(self):\n t = object()\n id = object()\n mock_trend = mock.Mock(n_items=1)\n mock_ryan_adams = mock.Mock(_base_inputs={'t': t, 'id': id})\n\n RyanAdams._build_component_layer(mock_ryan_adams, mock_trend)\n mock_trend.assert_called_with([t, None])\n\n def test__list_if_str_or_none(self):\n actual1 = RyanAdams._list_if_str_or_none('a')\n expected1 = ['a']\n self.assertEqual(actual1, expected1)\n\n actual2 = RyanAdams._list_if_str_or_none(None)\n expected2 = []\n self.assertEqual(actual2, expected2)\n\n actual3 = RyanAdams._list_if_str_or_none([1, 2])\n expected3 = [1, 2]\n self.assertEqual(actual3, expected3)\n\n @mock.patch('ryan_adams.RyanAdams._max_n_items')\n @mock.patch('ryan_adams.RyanAdams._build_model')\n def test__build_inputs_n_items_equal_1(self, mock__build_model, mock__max_n_items):\n mock__max_n_items.return_value = 1\n ryan_adams = RyanAdams()\n base_inputs, feature_inputs = ryan_adams._build_inputs()\n self.assertNotIn('id', ryan_adams._base_inputs)\n\n @mock.patch('ryan_adams.RyanAdams._max_n_items')\n @mock.patch('ryan_adams.RyanAdams._build_model')\n def test__build_inputs_n_items_greater_than_1(self, mock__build_model, mock__max_n_items):\n mock__max_n_items.return_value = 2\n ryan_adams = RyanAdams()\n base_inputs, feature_inputs = ryan_adams._build_inputs()\n self.assertIn('id', ryan_adams._base_inputs)\n\n @mock.patch('ryan_adams.RyanAdams._max_n_items')\n @mock.patch('ryan_adams.RyanAdams._build_model')\n def test__build_inputs_user_inputs(self, mock__build_model, mock__max_n_items):\n mock__max_n_items.return_value = 2\n ryan_adams = RyanAdams(inputs={'t': 1, 'id': -1})\n base_inputs, feature_inputs = ryan_adams._build_inputs()\n actual = ryan_adams._base_inputs\n expected = {'t': 1, 'id': -1}\n self.assertEqual(actual, expected)\n\n def test__get_or_create(self):\n sentinel1 = object()\n sentinel2 = object()\n mock1 = mock.Mock()\n mock1.return_value = sentinel1\n mock2 = mock.Mock()\n\n actual1 = RyanAdams._get_or_create({}, 'a', mock1, 1, 2, a=1)\n expected1 = sentinel1\n actual2 = RyanAdams._get_or_create({'a': sentinel2}, 'a', mock1, 1, 2, a=1)\n expected2 = sentinel2\n\n self.assertIs(actual1, expected1)\n self.assertIs(actual2, expected2)\n mock1.assert_called_with(1, 2, a=1)\n mock2.assert_not_called()\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"dantegates/keras-prophet","sub_path":"tests/test_ryan_adams.py","file_name":"test_ryan_adams.py","file_ext":"py","file_size_in_byte":4474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"44190853700","text":"#!/usr/bin/env python3\n#Author: Jack Erickson \n#Week7 Interacting with a Website Assignment\n\nimport requests\n\nresponse = requests.get(\"https://notpurple.com\")\n\nwith open(\"my_web_file.html\", \"w\") as hFile:\n hFile.write(response.text)\n\n","repo_name":"jerickson13-matc/Python-Scripting","sub_path":"Week7/copy-web.py","file_name":"copy-web.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"17874774689","text":"import os\nimport cv2\n\noutput_dir = r'/data/yuzun/dataset/ADNI1_296sub/train_flip'\n# 读取图片\nimage_dir = r'/data/yuzun/dataset/ADNI1_296sub/train'\n\nfor img in os.listdir(image_dir):\n image_path = os.path.join(image_dir,img)\n image = cv2.imread(image_path)\n\n # 逆时针90°\n rotate_90_cv = cv2.rotate(image, cv2.ROTATE_90_COUNTERCLOCKWISE)\n\n # 镜像翻转\n xImg = cv2.flip(rotate_90_cv,1,dst=None) #水平镜像\n # xImg1 = cv2.flip(rotate_90_cv,0,dst=None) #垂直镜像\n # xImg2 = cv2.flip(img,-1,dst=None) #对角镜像\n\n output_path = os.path.join(output_dir,img)\n cv2.imwrite(output_path,xImg)\n print(image_path)","repo_name":"DejaVuyan/python","sub_path":"image/image_rotate.py","file_name":"image_rotate.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"39339161492","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# class NanglenPipeline(object):\n# def process_item(self, item, spider):\n# return item\n\nimport pymongo\nimport datetime\n\nclass MoviePipeline(object):\n\n collection_name = 'movies'\n today = datetime.datetime.now()\n current_year = str(today.year)\n if today.month < 10:\n current_month = '0' + str(today.month)\n else:\n current_month = str(today.month)\n\n if today.day < 10:\n current_day = '0' + str(today.day)\n else:\n current_day = str(today.day)\n \n current_date = current_year + current_month + current_day\n\n def __init__(self, mongo_server, mongo_port, mongo_db):\n self.mongo_server = mongo_server\n self.mongo_port = mongo_port\n self.mongo_db = mongo_db\n self.mongo_uri = mongo_server + \":\" + str(mongo_port)\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(\n mongo_server=crawler.settings.get('MONGO_SERVER'),\n mongo_port=crawler.settings.get('MONGO_PORT'),\n mongo_db=crawler.settings.get('MONGO_DB', 'items')\n )\n\n def open_spider(self, spider):\n self.client = pymongo.MongoClient(self.mongo_uri)\n self.db = self.client[self.mongo_db]\n\n def close_spider(self, spider):\n self.client.close()\n\n def process_item(self, item, spider):\n movie_collection = self.db[self.collection_name]\n found_movie = movie_collection.find_one({'Title': item['Title'], 'Year': item['Year']})\n item['LastOnAir'] = self.current_date\n if found_movie is None:\n movie_collection.insert_one(dict(item))\n return item\n else:\n movie_collection.find_one_and_update({'_id': found_movie['_id']}, {\n '$set': {'LastOnAir': self.current_date}\n })\n return item\n","repo_name":"ceruberu/nanglen_scrapy","sub_path":"nanglen/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"4826889199","text":"from django.test import TestCase\nimport json\n\n# Create your tests here.\n\n\nclass UrlTest(TestCase):\n def test_endpoit_get_request(self):\n response = self.client.get(\"/candidates\")\n self.assertEqual(response.status_code, 200)\n\n def test_endpoit_post_request(self):\n data = {\n \"name\": \"Txip\",\n \"workExperience\": [\n {\"start\": \"Jan 1998\", \"end\": \"Apr 2005\"},\n {\"start\": \"Jan 2005\", \"end\": \"Apr 2013\"},\n {\"start\": \"Feb 2015\", \"end\": \"May 2016\"},\n ],\n }\n response = self.client.post(\n \"/candidates\", json.dumps(data), content_type=\"aplication/json\"\n )\n self.assertEqual(response.status_code, 200)\n","repo_name":"dfkthsq/experience-counter","sub_path":"candidates/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"18573900309","text":"n = int(input(\"Enter a number :\"))\nm = 1\nsums = 0\nc = 1\nother_sum = 0\nd = 0\nwhile (n % (10**(m-1))) != n:\n ad = n % (10 ** m)\n if ad > 9:\n ad = ad // (10 ** d)\n sums += ad\n m += 1\n d += 1\nif sums >= 20:\n while sums % (10**(c-1)) != sums:\n ad = sums % (10 ** c)\n if ad > 9:\n ad //= (10 ** (c-1))\n other_sum += ad\n c += 1 \nif other_sum == 3 or sums < 20 and sums % 3 == 0:\n print(n ,\"is divisible by 3\")\nelse:\n print(n,\"is not divisible by 3\")","repo_name":"215026717/PythonFirstYear","sub_path":"Divisiblity.py","file_name":"Divisiblity.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"42597133540","text":"import os\n\nimport pytest\n\nfrom Component import Component\nfrom render import draw_svg_component\nfrom svg_testing import svg_file_validation\n\n\nclass TestSVGCard:\n def test_draw_svg_card_save(self):\n c = Component(\n name='TestComponent',\n level=1,\n image='firewall.png',\n throughput=1,\n latency=1,\n cost=1,\n base=0,\n colour='#f38181'\n )\n\n draw_svg_component(c)\n\n assert os.path.exists('output/test_component-L1.svg')\n\n a, e = svg_file_validation('output/test_component-L1.svg', 'tests/fixtures/component.fixture.svg')\n assert a == e\n","repo_name":"JacobAGTyler/ArchitectureGame","sub_path":"tests/test_render.py","file_name":"test_render.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"27409334366","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport correlatefitter as cf\nimport time\nfrom inspect import signature\nimport numpy.linalg as lin\nimport random\nfrom pathlib import Path\n\nmaxlen = 50\ncorperconfig = 20\nbinsize = 20\nrpm = False\nstart=1\nend=5\n\ndef fittedfunc(x,a,m,b):\n\treturn a - m * x - b*np.log(x)\n\ndef fittedfunc2(x,a,m,b):\n\treturn a*np.exp(-m*x)/(x**b)\n\ndef bindingenergy(x,a1,m,b1,a2,M,b2):\n\treturn (2*a1-a2-2*m*x+M*x+(2*b1-b2)*np.log(x))/x\n\nsig = signature(fittedfunc2)\nnumpara = len(sig.parameters) - 1\n\nm=0.001\ntotalcor1,allpara1,parastderr1,allchisq1 = cf.fits(fittedfunc,Path('./correlatordata/allcorrelators_m0=%f.npy'%m),maxlen,corperconfig,binsize,m,start,end,numpara,'log',0,rpm)\nL = len(allpara1[1,:])\n\n# E= np.zeros(L)\n# for bmass in np.arange(0.001,0.051,0.001):\n# \tm,M,E[int(bmass*1000-1)] = smearefit(fittedfunc,Path('./correlatordata/allcorrelators_m0=%f.npy'%bmass),maxlen,corperconfig,binsize,bmass,start,end,numpara,'log',1)\n\n\n\nonemass = np.zeros((50,L))\ntwomass = np.zeros((50,L))\nfor bmass in np.arange(0.001,0.051,0.001):\n# totalcor1,finalpara1,parastderr,finalchisq = cf.fits(fittedfunc,Path('testt.npy'),maxlen,corperconfig,binsize,bmass,start,end,numpara,'log')\n\ttotalcor1,allpara1,parastderr1,allchisq1 = cf.fits(fittedfunc,Path('./correlatordata/allcorrelators_m0=%f.npy'%bmass),maxlen,corperconfig,binsize,bmass,start,end,numpara,'log',0,rpm)\n\tonemass[int(1000*bmass-1),:]=allpara1[1,:]\n\ttotalcor2,allpara2,parastderr2,allchisq2 = cf.fits(fittedfunc,Path('./correlatordata/alltwoparticlecorrelators_m0=%f.npy'%bmass),maxlen,corperconfig,binsize,bmass,start,end,numpara,'log',0,rpm)\n\ttwomass[int(1000*bmass-1),:]=allpara2[1,:]\n\nnp.save(Path('./mass/onemass_4b0_%d-%d'%(start,end)),onemass)\nnp.save(Path('./mass/twomass_4b0_%d-%d'%(start,end)),twomass)\n\n\n\n\n\n# print('chi:',finalchisq1)\n# totalcor,finalpara,parastderr,finalchisq = cf.fits(fittedfunc2,Path('./correlatordata/allcorrelators_m0=%f.npy'%bmass),maxlen,corperconfig,binsize,bmass,start,end,numpara,'exp')\n# m=finalpara1[1] \n# totalcor,finalpara2,parastderr,finalchisq = cf.fits(fittedfunc2,Path('./correlatordata/alltwoparticlecorrelators_m0=%f.npy'%bmass),maxlen,corperconfig,binsize,bmass,start,end,numpara,'exp')\n# M=finalpara2[1]\n\n\nexit()\nnonzero1 = 10\nfig1 = plt.figure()\n#Plot the correlator with error bars\nx = np.arange(1,nonzero1)\n#y = fittedfunc2(x,finalpara[0],finalpara[1],finalpara[2])\ny = np.exp(fittedfunc(x,finalpara1[0],finalpara1[1],finalpara1[2]))\ny1 = np.mean(totalcor1[1:nonzero1],axis = 1)\nerr = np.std(totalcor1[1:nonzero1],axis = 1)/np.sqrt(len(totalcor1[0,:])-1)\n#err1 = err[0:nonzero]\nplt.errorbar(x,y1,yerr = err, ecolor = 'red',linewidth = 1, capsize = 1)\n#plt.yscale('log')\nplt.plot(x,y,'red')\nplt.grid(True)\nplt.xlabel('Geodesic Distance')\nplt.ylabel('Correlator')\nplt.title('start %d, end %d'%(start,end))\nplt.show()\n\n\nexit()\nplotlenth = 33\nplotdata(totalcortmp1,plotlenth)\nplotfit(finalpara,form,plotlenth)","repo_name":"Hogwarts23/Lattice-Quantum-Gravity-2","sub_path":"main3.py","file_name":"main3.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"70777921493","text":"\n##############################\n# Files\n##############################\n\nimport os\n\n##############################\n# Activity 3 - Images\n##############################\n\n\n## Question 1 ##\n\ndef write_image_bw():\n\n # Create a file to write\n filename = \"image_bw.pbm\"\n fi = open(filename,\"w\")\n\n # Header\n fi.write(\"P1\\n\") # Black and white image\n nb_col = 300\n nb_lin = 200\n fi.write(str(nb_col) + \" \" + str(nb_lin) + \"\\n\")\n\n\n for i in range(nb_lin):\n line = \"\"\n for j in range(nb_col):\n col = (i+j)//10 % 2\n line = line + str(col) + \" \"\n line = line + \"\\n\"\n\n # Write the line\n fi.write(line)\n\n # Close file\n fi.close()\n\n return\n# Test\n\nprint(\"--- File 'image_bw.pbm' ---\")\nwrite_image_bw()\n\n\n## Question 2 ##\n\ndef write_image_gray():\n\n # Create a file to write\n filename = \"image_gray.pgm\"\n fi = open(filename,\"w\")\n\n # Header\n fi.write(\"P2\\n\") # Grayscale image\n nb_col = 200\n nb_lin = 200\n fi.write(str(nb_col) + \" \" + str(nb_lin) + \"\\n\")\n levels = 255\n fi.write(str(levels) + \"\\n\") \n\n for i in range(nb_lin):\n line = \"\"\n for j in range(nb_col):\n col = (i**2 + j**2) % 256 # a level of gray: a function of i and j\n line = line + str(col) + \" \" \n line = line + \"\\n\"\n\n # Write line\n fi.write(line)\n\n # Close file\n fi.close()\n\n return\n\n# Test\n\nprint(\"--- File 'image_gray.pgm' ---\")\nwrite_image_gray()\n\n\n## Question 3 ##\n\ndef ecrire_fichier_image_col():\n\n # Create a file to write\n filename = \"image_col.ppm\"\n fic = open(filename,\"w\")\n\n # Header\n fic.write(\"P3\\n\") # Color image\n nb_col = 200\n nb_lin = 200\n fic.write(str(nb_col) + \" \" + str(nb_lin) + \"\\n\")\n levels = 255\n fic.write(str(levels) + \"\\n\") \n\n for i in range(nb_lin):\n line = \"\"\n for j in range(nb_col):\n R = (i*j) % 256 # red level\n G = i % 256 # green level\n B = (i+j)//3 % 256 # blue level\n\n line = line + str(R) + \" \" + str(G) + \" \" + str(B) + \" \" \n line = line + \"\\n\"\n\n # Write line\n fic.write(line)\n\n # Close file\n fic.close()\n\n return\n\n# Test\n\nprint(\"--- File 'image_col.ppm' ---\")\necrire_fichier_image_col()\n\n\n\n\n## Question 4 ##\n\ndef inverse_black_white(filename):\n\n # Input file\n fi_in = open(filename,\"r\")\n\n # Output file\n name, extension = os.path.splitext(filename)\n new_name = name + \"_inverse\" + extension \n fi_out = open(new_name,\"w\")\n\n\n i = 0 # Line number\n for line in fi_in:\n\n if i<2: # Keep first 2 lines\n fi_out.write(line) \n else:\n mylist = line.split()\n new_line = \"\"\n for l in mylist:\n if l == \"1\":\n new_line = new_line + \"0 \"\n else:\n new_line = new_line + \"1 \"\n\n new_line = new_line + \"\\n\"\n fi_out.write(new_line)\n\n i = i + 1\n\n # Close all files\n fi_in.close()\n fi_out.close()\n return\n\nprint(\"--- Inverse black and white ---\")\ninverse_black_white(\"simple_bw.pbm\")\n\n\n## Question 4 ##\n\ndef formula_color_to_gray(R,G,B):\n gray = round(0.21*R + 0.72*G + 0.07*R)\n return gray\n\ndef color_to_gray(filename):\n\n # Input file\n fi_in = open(filename,\"r\")\n\n # Output\n name, extension = os.path.splitext(filename)\n new_name = name + \"_gray\" + \".pgm\"\n fi_out = open(new_name,\"w\")\n\n\n i = 0 # Line number\n for line in fi_in:\n if i == 0:\n fi_out.write(\"P2\\n\") # Grayscale image\n elif i == 1 or i == 2: # Keep line 2 and 3\n fi_out.write(line) \n else:\n mylist = line.split()\n new_line = \"\"\n\n j = 0 # Column number\n while j < len(mylist):\n R = int(mylist[j])\n G = int(mylist[j+1])\n B = int(mylist[j+2])\n gray = formula_color_to_gray(R,G,B)\n new_line = new_line + str(gray) + \" \"\n \n j = j + 3\n\n new_line = new_line + \"\\n\"\n fi_out.write(new_line)\n\n i = i + 1\n\n # Close all files\n fi_in.close()\n fi_out.close()\n return\n\nprint(\"--- Color to grayscale ---\")\ncolor_to_gray(\"image_col.ppm\")\n\n\n","repo_name":"exo7math/python1-en-exo7","sub_path":"files/files_3.py","file_name":"files_3.py","file_ext":"py","file_size_in_byte":4364,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"67"}
+{"seq_id":"41800126961","text":"import numpy\n\nfrom . import FairseqLRScheduler, register_lr_scheduler\n\n\n@register_lr_scheduler('noam')\nclass NoamSchedule(FairseqLRScheduler):\n \"\"\"Decay the LR based on the inverse square root of the update number.\n\n We also support a warmup phase where we linearly increase the learning rate\n from some initial learning rate (`--warmup-init-lr`) until the configured\n learning rate (`--lr`). Thereafter we decay proportional to the number of\n updates, with a decay factor set to align with the configured learning rate.\n\n During warmup:\n\n lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_steps)\n lr = lrs[update_num]\n\n After warmup:\n\n lr = decay_factor / sqrt(update_num)\n\n where\n\n decay_factor = args.lr * sqrt(args.warmup_steps)\n \"\"\"\n\n def __init__(self, args, optimizer):\n super().__init__(args, optimizer)\n if len(args.lr) > 1:\n raise ValueError(\n 'Cannot use a fixed learning rate schedule with inverse_sqrt.'\n ' Consider --lr-scheduler=fixed instead.'\n )\n self.warmup_steps = args.warmup_steps\n self.model_size = args.model_size\n # initial learning rate\n self.original_lr = args.lr[0]\n self.lr = args.lr[0]\n self.optimizer.set_lr(self.lr)\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add arguments to the parser for this LR scheduler.\"\"\"\n parser.add_argument('--warmup-steps', default=4000, type=int, metavar='N',\n help='warmup the learning rate linearly for the first N updates')\n parser.add_argument('--model-size', default=-1, type=int, metavar='N',\n help='warmup the learning rate linearly for the first N updates')\n\n def step(self, epoch, val_loss=None):\n \"\"\"Update the learning rate at the end of the given epoch.\"\"\"\n super().step(epoch, val_loss)\n # we don't change the learning rate at epoch boundaries\n return self.optimizer.get_lr()\n\n def step_update(self, num_updates):\n \"\"\"Update the learning rate after each update.\"\"\"\n assert self.model_size > 0\n num_updates = max(num_updates, 1)\n decay = numpy.min([numpy.power(num_updates, -0.5),\n numpy.power(self.warmup_steps, -1.5) * num_updates])\n factor = numpy.power(self.model_size, -0.5)\n self.lr = self.original_lr * factor * decay\n self.optimizer.set_lr(self.lr)\n return self.lr\n","repo_name":"DeepLearnXMU/ABDNMT-RNMT","sub_path":"thseq/optim/lr_scheduler/noam_schedule.py","file_name":"noam_schedule.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"67"}
+{"seq_id":"21689603574","text":"import numpy as np\nimport cvxpy as cp\nfrom numpy.lib import utils\nimport torch\nimport scipy.spatial\nimport time\nfrom scipy.stats import wishart\nfrom sklearn import metrics\n\nd = 3\n\ndtype = torch.float\ndevice = torch.device(\"cpu\")\n\ndef MatConvert(x, device, dtype):\n \"\"\"convert the numpy to a torch tensor.\"\"\"\n x = torch.from_numpy(x).to(device, dtype)\n return x\n\ndef Pdist2(x, y):\n \"\"\"compute the paired distance between x and y.\"\"\"\n Pdist = scipy.spatial.distance.cdist(x,y,'sqeuclidean')\n\n # # x_norm = (x ** 2).sum(1).view(-1, 1)\n # # if y is not None:\n # # y_norm = (y ** 2).sum(1).view(1, -1)\n # # else:\n # # y = x\n # # y_norm = x_norm.view(1, -1)\n # # Pdist = x_norm + y_norm - 2.0 * torch.mm(x, torch.transpose(y, 0, 1))\n # Pdist[Pdist<0]=0\n return Pdist\n\ndef kernelwidthPair(x1, x2):\n '''Implementation of the median heuristic. See Gretton 2012\n Pick sigma such that the exponent of exp(- ||x-y|| / (2*sigma2)),\n in other words ||x-y|| / (2*sigma2), equals 1 for the median distance x\n and y of all distances between points from both data sets X and Y.\n '''\n n, nfeatures = x1.shape\n m, mfeatures = x2.shape\n \n k1 = np.sum((x1*x1), 1)\n q = np.tile(k1, (m, 1)).transpose()\n del k1\n \n k2 = np.sum((x2*x2), 1)\n r = np.tile(k2, (n, 1))\n del k2\n \n h= q + r\n del q,r\n \n # The norm\n h = h - 2*np.dot(x1,x2.transpose())\n h = np.array(h, dtype=float)\n \n mdist = np.median([i for i in h.flat if i])\n\n return mdist\n\ndef sampling_sphere(d, n=1):\n x_hist = []\n for i in range(n):\n x = np.random.normal(size = (d,))\n x = x*0.5 / np.linalg.norm(x)\n x_hist.append(x)\n return x_hist\n\ndef sampling_wishart(d, n=1):\n cov_hist = []\n for i in range(n):\n \n cov = np.zeros([3,3])\n for j in range(d):\n x = np.random.normal(size = (d,1))\n cov = cov + x @ x.T\n \n cov_hist.append(cov)\n return cov_hist\n\ndef parameter_generation(d, L):\n # generate target mean vectors and covariance matrices\n # Input:\n # d: size within a single block\n # L: number of blocks\n # Output:\n # mean_mu_hist: list of mean vectors from mu\n # mean_nu_hist: list of mean vectors from nu\n # cov_mu_hist: list of covariance matrics from mu\n # cov_nu_hist: list of covariance matrics from nu\n\n mean_mu_hist = sampling_sphere(d, L)\n cov_mu_hist = sampling_wishart(d, L)\n\n mean_nu_hist = mean_mu_hist.copy()\n x_nu = np.random.normal(size = (d,))\n x_nu = x_nu*0.5 / np.linalg.norm(x_nu)\n mean_nu_hist[0] = x_nu #= mean_nu_hist[0] * 0\n\n\n cov_nu_hist = cov_mu_hist.copy()\n cov_nu = np.zeros([3,3])\n for j in range(d):\n x = np.random.normal(size = (d,1))\n cov_nu = cov_nu + x @ x.T\n cov_nu_hist[0] = cov_nu\n \n return mean_mu_hist, mean_nu_hist, cov_mu_hist, cov_nu_hist\n\ndef data_generation(mean_mu_hist, mean_nu_hist, cov_mu_hist, cov_nu_hist, n):\n # generate data points from mu and nu with sample size n\n L = len(mean_mu_hist)\n d = len(mean_mu_hist[0])\n D = d*L\n\n X = np.zeros([n,0])\n Y = np.zeros([n,0])\n for ell in range(L):\n X_ell = np.random.multivariate_normal(mean_mu_hist[ell], cov_mu_hist[ell], size=n)\n X = np.concatenate((X, X_ell), axis=1)\n \n Y_ell = np.random.multivariate_normal(mean_nu_hist[ell], cov_nu_hist[ell], size=n)\n Y = np.concatenate((Y, Y_ell), axis=1)\n \n return X, Y\n","repo_name":"WalterBabyRudin/MMDVar_Selection","sub_path":"utils_revision.py","file_name":"utils_revision.py","file_ext":"py","file_size_in_byte":3554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"16484031176","text":"#Задача№4\n# Задана натуральная степень k. Сформировать случайным образом \n# список коэффициентов (значения от 0 до 100) многочлена и записать\n# в файл многочлен степени k(до 6 степени).*\n\n# *Пример:* \n\n# - k=2 => 2*x² + 4*x + 5 = 0 или x² + 5 = 0 или 10*x² = 0\n\nfrom random import randint\n\nk = int(input('Введите число '))\nlst = []\n\nwhile k >= 0:\n m = randint(0,100)\n\n ch_0 = [m,'x^',k]\n sch = ''.join(map(str,ch_0))\n lst.append(sch)\n str_0 = \" + \".join(map(str,lst))\n \n print(str_0)\n\n if k == 1:\n ch_1 = [' + ',m,'x']\n #print([m,\"x\"]) \n str_1 = \"\".join(map(str,ch_1))\n \n print(str_1)\n \n if k == 0:\n ch_2 = [' + ',m,' = 0']\n str_2 = ''.join(map(str,ch_2))\n print(str_2)\n k-=1\nequation_str = str_0 + str_1 + str_2\n\nprint(equation_str) \ndata = open('polynomial.txt','a')\ndata.writelines(equation_str)\ndata.close()\n","repo_name":"Alexey-GT/Homework_4PY","sub_path":"Task_4.py","file_name":"Task_4.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"30761340675","text":"from __future__ import print_function\n\nimport numpy as np\nimport cv2\n\n\nclass PatchExtractor(object):\n \"\"\"\"OpenCV SIFT wrapper.\"\"\"\n\n def __init__(self, patch_size=32):\n self.patch_size = patch_size\n\n def get_interest_region(self, gray_img, kpts):\n \"\"\"Get the interest region around a keypoint.\n Args:\n gray_img: Grayscale input image.\n kpts: Nx6 keypoint transformation.\n Returns:\n all_patches: An array of patches of Nx32x32.\n \"\"\"\n\n kpt_n = kpts.shape[0]\n H = gray_img.shape[0]\n W = gray_img.shape[1]\n batch_input_grid = []\n all_patches = []\n bs = 30 # limited by OpenCV remap implementation\n for idx in range(kpt_n):\n # construct affine transformation matrix.\n affine_mat = np.zeros((3, 2), dtype=np.float32)\n affine_mat[0, 0] = kpts[idx, 0] * W / 2\n affine_mat[1, 0] = kpts[idx, 1] * W / 2\n affine_mat[2, 0] = kpts[idx, 2] * W / 2 + W / 2\n affine_mat[0, 1] = kpts[idx, 3] * H / 2\n affine_mat[1, 1] = kpts[idx, 4] * H / 2\n affine_mat[2, 1] = kpts[idx, 5] * H / 2 + H / 2\n # get input grid.\n input_grid = np.matmul(self.output_grid, affine_mat)\n input_grid = np.reshape(input_grid, (-1, 1, 2))\n batch_input_grid.append(input_grid)\n\n if len(batch_input_grid) != 0 and len(batch_input_grid) % bs == 0 or idx == kpt_n - 1:\n # sample image pixels.\n batch_input_grid_ = np.concatenate(batch_input_grid, axis=0)\n patches = cv2.remap(gray_img.astype(np.float32), batch_input_grid_,\n None, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)\n patches = np.reshape(patches, (len(batch_input_grid),\n self.patch_size, self.patch_size))\n all_patches.append(patches)\n batch_input_grid = []\n if len(all_patches) != 0:\n all_patches = np.concatenate(all_patches, axis=0)\n else:\n all_patches = None\n return all_patches\n\n def get_patches(self, gray_img, kpts):\n \"\"\"Get all patches around given keypoints.\n Args:\n cv_kpts: A list of keypoints represented as cv2.KeyPoint.\n Return:\n all_patches: (n_kpts, 32, 32) Cropped patches.\n \"\"\"\n\n # generate sampling grids.\n n_pixel = np.square(self.patch_size)\n self.output_grid = np.zeros((n_pixel, 3), dtype=np.float32)\n for i in range(n_pixel):\n self.output_grid[i, 0] = (i % self.patch_size) * 1. / self.patch_size * 2 - 1\n self.output_grid[i, 1] = (i // self.patch_size) * 1. / self.patch_size * 2 - 1\n self.output_grid[i, 2] = 1\n\n all_patches = self.get_interest_region(gray_img, kpts)\n return all_patches\n","repo_name":"lzx551402/GL3D","sub_path":"utils/patch_extractor.py","file_name":"patch_extractor.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","stars":265,"dataset":"github-code","pt":"67"}
+{"seq_id":"15345475149","text":"import yaml\nimport os\nfrom dataclasses import dataclass\n\nimport torch\nfrom beartype import beartype\nfrom beartype.typing import Optional\n\n\n@dataclass\nclass ConfigReward:\n \"\"\"Config parameters for the reward model\n\n Attributes:\n model (str): Model to be used for the reward model\n model_folder (str): Path to the folder where model are stored (used\n to load / store finetuned model)\n device (torch.device): Device to be used for the reward model\n model_head_hidden_size (int): Hidden size of the reward model head\n debug (bool): enable prints for Debugging\n train_dataset_path (Optional[str]): Path to the training dataset.\n Default to None. To be specified only for the reward model trainig.\n validation_dataset_path (Optional[str]): Path to the validation\n dataset. Default to None. To be specified only for the reward\n model trainig.\n batch_size (Optional[int]): Batch size to train the reward model.\n Default to None. To be specified only for the reward model\n trainig.\n epochs (Optional[int]): Number of epochs to train the reward model.\n Default to None. To be specified only for the reward model\n trainig.\n iteration_per_print (Optional[int]): Number of iterations to print\n the training loss. Default to None. To be specified only for the\n reward model trainig.\n lr (Optional[float]): Learning rate for the reward model. Default to\n None. To be specified only for the reward model distillation.\n llm_model (Optional[str]): Model to be used for the language model\n (LLM). Default to None.\n llm_max_tokens (Optional[int]): Max tokens for the LLM. Default to\n None.\n llm_temperature (Optional[float]): Temperature for the LLM. Default\n to None.\n \"\"\"\n\n model: str\n model_folder: str\n device: torch.device\n model_head_hidden_size: int\n debug: bool\n train_dataset_path: Optional[str] = None\n validation_dataset_path: Optional[str] = None\n batch_size: Optional[int] = None\n epochs: Optional[int] = None\n iteration_per_print: Optional[int] = None\n lr: Optional[float] = None\n llm_model: Optional[str] = None\n llm_max_tokens: Optional[int] = None\n llm_temperature: Optional[float] = None\n\n\n@dataclass\nclass ConfigActor:\n \"\"\"Config parameters for models\n\n Attributes:\n model (str): Model to be used for the actor\n model_folder (str): Path to the folder where model are stored (used\n to load / store finetuned model)\n max_tokens (int): Max tokens for the actor\n temperature (float): Temperature for the actor\n device (torch.device): Device to be used for the actor\n lr (float): Learning rate for the actor\n iteration_per_print (int): Number of iterations to print the\n training loss\n batch_size (int): Batch size to train the actor\n epochs (int): Number of epochs to train the actor\n debug (bool): Enable prints for debugging\n train_dataset_path (str): Path to the training dataset\n validation_dataset_path (Optional[str]): Path to the validation dataset\n \"\"\"\n\n model: str\n model_folder: str\n tokenizer_folder: str\n max_tokens: int\n temperature: float\n device: torch.device\n lr: float\n iteration_per_print: int\n batch_size: int\n epochs: int\n debug: bool\n train_dataset_path: str\n validation_dataset_path: Optional[str] = None\n\n\n@dataclass\nclass ConfigTrainer:\n \"\"\"Config parameters for the trainer, used to configure the reinforcement\n learning training loop\n\n Attributes:\n update_timesteps (int): Number of timesteps to update the actor\n and critic. Every time update_timesteps timesteps are collected,\n the training loop for the actor and critic is executed using the\n memory buffer to learn the policy.\n temperature (float): Temperature for the actor and critic\n max_seq_len (int): Max sequence length for the actor and critic\n num_examples (int): Number of examples to generate for the actor\n and critic. For each iteration of timestep, num_examples are\n sampled from the prompt dataset, processed and stored in the\n memory buffer.\n actor_lr (float): Learning rate for the actor when training with\n reinforcement learning\n critic_lr (float): Learning rate for the critic when training with\n reinforcement learning\n num_episodes (int): Number of episodes, each episodes consist of\n a number of timesteps that are used to generate examples\n stored in the memory buffer.\n max_timesteps (int): Max timesteps for the actor and critic.\n for each timestep a set of examples are sampled and used to\n generate a completion and a reward.\n batch_size (int): Batch size to train the actor and critic.\n This batch is used to aggregate the memory from the memory buffer\n for the actual training of the actor and critic models.\n epochs (int): Number of epochs to train the actor and critic.\n actor_eps_clip (float): Epsilon clip for the actor\n critic_eps_clip (float): Epsilon clip for the critic\n beta_s (float): Beta for the actor and critic\n update_checkpoint (int): Number of timesteps to update the checkpoint\n llm_model_id (str): Model id for the llm\n llm_max_tokens (int): Max tokens for the llm\n llm_temperature (float): Temperature for the llm\n device (torch.device): Device to be used for the actor and critici\n checkpoint_folder (str): Folder to store the checkpoints while training\n debug (bool): Enable prints for debugging\n \"\"\"\n\n update_timesteps: int\n num_examples: int\n actor_lr: float\n critic_lr: float\n num_episodes: int\n max_timesteps: int\n examples_path: str\n batch_size: int\n epochs: int\n actor_eps_clip: float\n critic_eps_clip: float\n beta_s: float\n update_checkpoint: int\n llm_model_id: str\n llm_max_tokens: int\n llm_temperature: float\n device: torch.device\n checkpoint_folder: str\n debug: bool\n\n\nclass Config:\n \"\"\"Store the config parameters for the whole pipeline\n\n Args:\n trainer_dict (Optional[Dict]): Dictionary with the config parameters\n for the trainer. Default to None. If None, the config.yaml is\n used.\n actor_dict (Optional[Dict]): Dictionary with the config parameters\n for the actor. Default to None. If None, the config.yaml is\n used.\n critic_dict (Optional[Dict]): Dictionary with the config parameters\n for the critic. Default to None. If None, the config.yaml is\n used.\n reward_dict (Optional[Dict]): Dictionary with the config parameters\n for the reward. Default to None. If None, the config.yaml is\n used.\n device (Optional[torch.device]): Device to be used for the actor\n and critic. Default to None. If None, the device available is\n used.\n debug (Optional[bool]): Enable prints for debugging. Default to False.\n\n Attributes:\n trainer (ConfigTrainer): Config parameters for the trainer\n actor (ConfigActor): Config parameters for the actor\n critic (ConfigCritic): Config parameters for the critic\n reward (ConfigReward): Config parameters for the reward\n \"\"\"\n\n @beartype\n def __init__(\n self,\n path: str,\n device: Optional[torch.device] = None,\n debug: Optional[bool] = False,\n ) -> None:\n\n # if not specified use the device available\n if device is None:\n device = torch.device(\n \"cuda\" if torch.cuda.is_available() else \"cpu\"\n )\n print(f\"Current device used:{str(device)}\")\n\n if path is None or os.path.exists(path) is False:\n raise ValueError(\"Path to the config.yaml is not valid\")\n\n # Read the config from yaml\n with open(path, \"r\") as c:\n config = yaml.safe_load(c)\n\n trainer_dict = config[\"trainer_config\"]\n actor_dict = config[\"actor_config\"]\n critic_dict = config[\"critic_config\"]\n reward_dict = config[\"reward_config\"]\n\n # Trainer Config\n trainer_dict[\"device\"] = device\n trainer_dict[\"debug\"] = debug\n self.trainer = ConfigTrainer(**trainer_dict)\n # Actor Config\n actor_dict[\"device\"] = device\n actor_dict[\"debug\"] = debug\n self.actor = ConfigActor(**actor_dict)\n # Critic Config\n critic_dict[\"device\"] = device\n critic_dict[\"debug\"] = debug\n self.critic = ConfigReward(**critic_dict)\n # Reward Config\n reward_dict[\"device\"] = device\n reward_dict[\"debug\"] = debug\n self.reward = ConfigReward(**reward_dict)\n","repo_name":"juncongmoo/chatllama","sub_path":"chatllama/rlhf/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":9041,"program_lang":"python","lang":"en","doc_type":"code","stars":1160,"dataset":"github-code","pt":"67"}
+{"seq_id":"36960854479","text":"# Naveh,Marchoom,312275746\r\n# Python 3.6\r\n\r\n# Ido, Natan, 305727802\r\n\r\n\r\ndef flip_bit(text: bytes, index: int) -> bytes:\r\n \"\"\"\r\n Given a byte string and a bit index, flip the bit in given index.\r\n :param text: byte string\r\n :param index: index for the bit to be flipped\r\n :return: the flipped string\r\n \"\"\"\r\n temp = bytearray(text)\r\n temp[int(index / 8)] ^= (1 << (index % 8))\r\n return bytes(temp)\r\n\r\n\r\ndef find_corrupted_bit_index(block: bytes) -> int:\r\n \"\"\"\r\n Given a byte string block where all bytes should be similar, finds the unmatching byte and returns\r\n the index of the flipped bit.\r\n :param block: byte string\r\n :return: the index of the flipped bit\r\n \"\"\"\r\n byte_count = [block.count(byte) for byte in block]\r\n bit_index = [1 << i for i in range(8)].index(block[byte_count.index(1)] ^ block[byte_count.index(15)])\r\n return (byte_count.index(1) * 8) + bit_index\r\n\r\n\r\ndef find_corrupted_block_index(blocks: list) -> int:\r\n \"\"\"\r\n Given an array of byte strings, where each of them should be made of a recurring byte, find the first\r\n string which doesn't follow this rule, and return it's index.\r\n :param blocks: an array of byte strings\r\n :return: the index of the corrupted block\r\n \"\"\"\r\n for i in range(len(blocks)):\r\n if blocks[i].count(blocks[i][0]) != 16:\r\n return i\r\n\r\n\r\ndef cbc_custom_decrypt(k: bytes, n: int, cipher: bytes) -> bytes:\r\n \"\"\"\r\n Given a key, block count and a cipher-text of a byte string encrypted using CBC-AES128, decipher it.\r\n :param k: byte string representing the key used for encryption\r\n :param n: the block count\r\n :param cipher: a concatenation of the IV (initialization vector) used for encrypting the text, and the cipher text\r\n :return: A decryption of given cipher text.\r\n \"\"\"\r\n return CBC(AES128, k, cipher[:16]).decipher(cipher[16:])\r\n\r\n\r\ndef cbc_flip_fix(k: bytes, n: int, cipher: bytes) -> bytes:\r\n \"\"\"\r\n Given a key, block count and a corrupted cipher-text of a byte string encrypted using CBC-AES128, where a random\r\n bit on the cipher text got flipped (as shown on the assignment instructions, not on the last block), fix the cipher\r\n text and returns the fixed decryption of the corrupted block\r\n :param k: byte string representing the key used for encryption\r\n :param n: the block count\r\n :param cipher: a concatenation of the IV used for encrypting the text, and the corrupted cipher text\r\n :return: A fixed decryption of the corrupted block\r\n \"\"\"\r\n corrupted_plain = cbc_custom_decrypt(k, n, cipher)\r\n corrupted_plain_blocks = [corrupted_plain[i:i + 16] for i in range(0, len(corrupted_plain), 16)]\r\n corrupted_block_index = find_corrupted_block_index(corrupted_plain_blocks)\r\n corrupted_bit_index = find_corrupted_bit_index(corrupted_plain_blocks[corrupted_block_index + 1])\r\n fixed_cipher = flip_bit(cipher[16:], 128 * corrupted_block_index + corrupted_bit_index)\r\n fixed_plain = cbc_custom_decrypt(k, n, cipher[:16] + fixed_cipher)\r\n return fixed_plain[corrupted_block_index * 16:corrupted_block_index * 16 + 16]\r\n\r\n\r\nclass BlockCipher:\r\n \"\"\"\r\n A block cipher interface.\r\n \"\"\"\r\n block_size: int\r\n cipher: callable\r\n decipher: callable\r\n\r\n\r\nclass CBC:\r\n \"\"\"\r\n A class implementing the Cipher Block Chaining mode of operation algorithm.\r\n Implements the ModeOfOperation interface.\r\n \"\"\"\r\n def __init__(self, block_cipher: BlockCipher, key: bytes, iv: bytes):\r\n self.block_cipher = block_cipher\r\n self.key = key\r\n self.iv = iv\r\n\r\n \"\"\"\r\n Given an array of bytes, will split them to blocks,\r\n and encrypt them using CBC algorithm.\r\n \"\"\"\r\n def cipher(self, plain_text: bytes) -> bytes:\r\n cipher_text = bytes(0)\r\n blocks = CBC.__split_to_blocks(\r\n CBC.__pad_text(plain_text, self.block_cipher.block_size), self.block_cipher.block_size)\r\n c = self.iv\r\n for block in blocks:\r\n c = self.block_cipher.cipher(CBC.__xor(block, c), self.key)\r\n cipher_text += c\r\n return cipher_text\r\n\r\n \"\"\"\r\n Given an array of bytes which were encrypted using the CBC algorithm,\r\n will split them to blocks, and decrypt them.\r\n \"\"\"\r\n def decipher(self, cipher_text: bytes) -> bytes:\r\n plain_text = bytes(0)\r\n blocks = CBC.__split_to_blocks(cipher_text, self.block_cipher.block_size)\r\n c = self.iv\r\n for block in blocks:\r\n plain_text += self.__xor(self.block_cipher.decipher(block, self.key), c)\r\n c = block\r\n return plain_text\r\n\r\n \"\"\"\r\n Given an array of bytes and a block size, will check if the array's length is of a multiplication of\r\n given block size, and if not will pad the array with zeros to get it to a multiplication of block size.\r\n \"\"\"\r\n @staticmethod\r\n def __pad_text(byte_array: bytes, block_size: int) -> bytes:\r\n if len(byte_array) % block_size != 0:\r\n byte_array += bytes(block_size - len(byte_array) % block_size)\r\n return byte_array\r\n\r\n \"\"\"\r\n Given an array of bytes and a block size, will split the array into a list of blocks each of block size length.\r\n \"\"\"\r\n @staticmethod\r\n def __split_to_blocks(byte_array: bytes, block_size: int) -> list:\r\n return [byte_array[i:i + block_size] for i in range(0, len(byte_array), block_size)]\r\n\r\n \"\"\"\r\n Given two array of bytes, will assert they are both of the same length, and apply bitwise xor between them.\r\n \"\"\"\r\n @staticmethod\r\n def __xor(block1: bytes, block2: bytes) -> bytes:\r\n assert len(block1) == len(block2)\r\n return bytes([block1[i] ^ block2[i] for i in range(len(block1))])\r\n\r\n\r\nBLOCK_SIZE = 16\r\nRG_FIELD = 0x1b\r\nNUM_ROUNDS = 10\r\nR_CON = [0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36]\r\nS_BOX = [0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,\r\n 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,\r\n 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,\r\n 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,\r\n 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,\r\n 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,\r\n 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,\r\n 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,\r\n 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,\r\n 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,\r\n 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,\r\n 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,\r\n 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,\r\n 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,\r\n 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,\r\n 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16]\r\nR_BOX = [0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,\r\n 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,\r\n 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,\r\n 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,\r\n 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,\r\n 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,\r\n 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,\r\n 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,\r\n 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,\r\n 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,\r\n 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,\r\n 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,\r\n 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,\r\n 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,\r\n 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,\r\n 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d]\r\n\r\n\r\nclass AES128(BlockCipher):\r\n \"\"\"\r\n A class implementing the Advanced Encryption Standard algorithm for encrypting a 16B string.\r\n Implements the BlockCipher interface.\r\n \"\"\"\r\n block_size = BLOCK_SIZE\r\n\r\n \"\"\"\r\n Given a 16B plain text and a 16B key, encrypt the byte array using AES encryption algorithm.\r\n \"\"\"\r\n @classmethod\r\n def cipher(cls, plain_text: bytes, key: bytes) -> bytes:\r\n assert len(plain_text) == cls.block_size and len(key) == cls.block_size\r\n plain_text = cls.__4x4_transpose(plain_text)\r\n expanded_key = cls.__generate_expanded_key(key)\r\n state = cls.__rijndael(plain_text, expanded_key, NUM_ROUNDS)\r\n return cls.__4x4_transpose(state)\r\n\r\n \"\"\"\r\n Given a 16B cipher text which was encrypted using AES algorithm and the 16B key used for the encryption,\r\n decrypts the cipher string.\r\n \"\"\"\r\n @classmethod\r\n def decipher(cls, cipher_text: bytes, key: bytes) -> bytes:\r\n assert len(cipher_text) == cls.block_size and len(key) == cls.block_size\r\n cipher_text = cls.__4x4_transpose(cipher_text)\r\n expanded_key = cls.__generate_expanded_key(key)\r\n state = cls.__reverse_rijndael(cipher_text, expanded_key, NUM_ROUNDS)\r\n return cls.__4x4_transpose(state)\r\n\r\n \"\"\"\r\n Apply the RIJNDAEL algorithm on given byte array and expanded key for num_rounds rounds.\r\n \"\"\"\r\n @classmethod\r\n def __rijndael(cls, byte_array: bytes, expanded_key: list, num_rounds: int):\r\n state = cls.__add_round_key(byte_array, expanded_key[0])\r\n for i in range(num_rounds):\r\n state = cls.__round(state, expanded_key[i + 1], (i == num_rounds - 1))\r\n return state\r\n\r\n \"\"\"\r\n Run a full AES round on given hex string.\r\n \"\"\"\r\n @classmethod\r\n def __round(cls, state: bytes, round_key: bytes, last_round: bool = False) -> bytes:\r\n state = cls.__byte_substitution(state)\r\n state = cls.__row_shift(state)\r\n state = cls.__mix_columns(state) if not last_round else state\r\n state = cls.__add_round_key(state, round_key)\r\n return state\r\n\r\n \"\"\"\r\n Apply the reverse RIJNDAEL algorithm on given byte array and expanded key for num_rounds rounds.\r\n \"\"\"\r\n @classmethod\r\n def __reverse_rijndael(cls, byte_array: bytes, expanded_key: list, num_rounds: int) -> bytes:\r\n state = cls.__add_round_key(byte_array, expanded_key[num_rounds])\r\n for i in range(num_rounds - 1, -1, -1):\r\n state = cls.__reverse_round(state, expanded_key[i], i + 1, (i == 0))\r\n return state\r\n\r\n \"\"\"\r\n Run a full reverse AES round on given state.\r\n \"\"\"\r\n @classmethod\r\n def __reverse_round(cls, state: bytes, round_key: bytes, nround, last_round: bool = False) -> bytes:\r\n state = cls.__reverse_row_shift(state)\r\n state = cls.__reverse_byte_substitution(state)\r\n state = cls.__add_round_key(state, round_key)\r\n state = cls.__reverse_mix_columns(state) if not last_round else state\r\n return state\r\n\r\n \"\"\"\r\n Replaces each byte in a byte array with the corresponding byte from S_BOX.\r\n \"\"\"\r\n @classmethod\r\n def __byte_substitution(cls, byte_array: bytes) -> bytes:\r\n return bytes([S_BOX[byte] for byte in byte_array])\r\n\r\n \"\"\"\r\n Replaces each byte in a byte array with the corresponding byte from R_BOX.\r\n \"\"\"\r\n @classmethod\r\n def __reverse_byte_substitution(cls, byte_array: bytes) -> bytes:\r\n return bytes([R_BOX[byte] for byte in byte_array])\r\n\r\n \"\"\"\r\n Shift the bytes in the byte array in the following manner for each 4 bytes:\r\n Each 1st byte doesn't shift. Each 2nd byte is shifted 4 to the left. Each 3rd byte is shifted 8 to the left.\r\n Each 4th byte is shifted 12 to the left.\r\n \"\"\"\r\n @classmethod\r\n def __row_shift(cls, byte_array: bytes) -> bytes:\r\n temp = [byte_array[i:i + 4] for i in range(0, BLOCK_SIZE, 4)]\r\n return bytes([temp[i][(j + i) % 4] for i in range(4) for j in range(4)])\r\n\r\n \"\"\"\r\n Reverse the shifting done in row_shift method.\r\n \"\"\"\r\n @classmethod\r\n def __reverse_row_shift(cls, byte_array: bytes) -> bytes:\r\n temp = [byte_array[i:i + 4] for i in range(0, BLOCK_SIZE, 4)]\r\n return bytes([temp[i][(j - i) % 4] for i in range(4) for j in range(4)])\r\n\r\n \"\"\"\r\n Apply the mix_column method on each column from a 4x4 representation of the byte array.\r\n \"\"\"\r\n @classmethod\r\n def __mix_columns(cls, byte_array: bytes) -> bytes:\r\n return cls.__4x4_transpose(bytes([byte for i in range(4) for byte in cls.__mix_column(byte_array[i:i + 16:4])]))\r\n\r\n \"\"\"\r\n Apply the mix column algorithm:\r\n \"\"\"\r\n @classmethod\r\n def __mix_column(cls, byte_array: bytes) -> bytes:\r\n return bytes([cls.__g_mul(byte_array[0], 0x02) ^ cls.__g_mul(byte_array[3], 0x01) ^\r\n cls.__g_mul(byte_array[2], 0x01) ^ cls.__g_mul(byte_array[1], 0x03),\r\n cls.__g_mul(byte_array[1], 0x02) ^ cls.__g_mul(byte_array[0], 0x01) ^\r\n cls.__g_mul(byte_array[3], 0x01) ^ cls.__g_mul(byte_array[2], 0x03),\r\n cls.__g_mul(byte_array[2], 0x02) ^ cls.__g_mul(byte_array[1], 0x01) ^\r\n cls.__g_mul(byte_array[0], 0x01) ^ cls.__g_mul(byte_array[3], 0x03),\r\n cls.__g_mul(byte_array[3], 0x02) ^ cls.__g_mul(byte_array[2], 0x01) ^\r\n cls.__g_mul(byte_array[1], 0x01) ^ cls.__g_mul(byte_array[0], 0x03)])\r\n\r\n \"\"\"\r\n Apply the reverse mix column algorithm on each column from a 4x4 representation of the byte array\r\n \"\"\"\r\n @classmethod\r\n def __reverse_mix_columns(cls, byte_array: bytes) -> bytes:\r\n byte_array = cls.__4x4_transpose(byte_array)\r\n return cls.__4x4_transpose(\r\n bytes([byte for i in range(0, 16, 4) for byte in cls.__reverse_mix_column(byte_array[i:i + 4])]))\r\n\r\n \"\"\"\r\n Reverse the mix column algorithm.\r\n \"\"\"\r\n @classmethod\r\n def __reverse_mix_column(cls, byte_array: bytes) -> bytes:\r\n return bytes([cls.__g_mul(byte_array[0], 0x0E) ^ cls.__g_mul(byte_array[3], 0x09) ^\r\n cls.__g_mul(byte_array[2], 0x0D) ^ cls.__g_mul(byte_array[1], 0x0B),\r\n cls.__g_mul(byte_array[1], 0x0E) ^ cls.__g_mul(byte_array[0], 0x09) ^\r\n cls.__g_mul(byte_array[3], 0x0D) ^ cls.__g_mul(byte_array[2], 0x0B),\r\n cls.__g_mul(byte_array[2], 0x0E) ^ cls.__g_mul(byte_array[1], 0x09) ^\r\n cls.__g_mul(byte_array[0], 0x0D) ^ cls.__g_mul(byte_array[3], 0x0B),\r\n cls.__g_mul(byte_array[3], 0x0E) ^ cls.__g_mul(byte_array[2], 0x09) ^\r\n cls.__g_mul(byte_array[1], 0x0D) ^ cls.__g_mul(byte_array[0], 0x0B)])\r\n\r\n \"\"\"\r\n Xor the round's key with current state.\r\n \"\"\"\r\n @classmethod\r\n def __add_round_key(cls, byte_array: bytes, round_key: bytes) -> bytes:\r\n return bytes([byte_array[i] ^ round_key[i] for i in range(BLOCK_SIZE)])\r\n\r\n \"\"\"\r\n Generates an expanded key.\r\n \"\"\"\r\n @classmethod\r\n def __generate_expanded_key(cls, key: bytes) -> list:\r\n word_array = [key[i:i + 4] for i in range(0, len(key), 4)] # The expansion algorithm iterate over 4B words.\r\n for i in range(len(word_array), (NUM_ROUNDS + 1) * 4): # We need 11 keys of 4x4B words each:\r\n word = word_array[i - 1]\r\n if i % 4 == 0:\r\n word = cls.__rotate_bytes_left(word)\r\n word = cls.__byte_substitution(word)\r\n word = bytes([word[0] ^ R_CON[int(i / 4) - 1]] + [byte for byte in word[1:]])\r\n word = bytes([word[j] ^ word_array[i - 4][j] for j in range(len(word))])\r\n word_array.insert(i, word)\r\n byte_array = [byte for i in range(len(word_array)) for byte in word_array[i]]\r\n # merge the words back to 16B keys:\r\n return [cls.__4x4_transpose(bytes(byte_array[i:i+16])) for i in range(0, len(byte_array), 16)]\r\n\r\n \"\"\"\r\n Transpose the byte array as a 4x4 array\r\n \"\"\"\r\n @staticmethod\r\n def __4x4_transpose(byte_array: bytes) -> bytes:\r\n return bytes([byte_array[i * 4 + j] for j in range(4) for i in range(4)])\r\n\r\n \"\"\"\r\n Multiplies 2 bytes in the Galois Field.\r\n \"\"\"\r\n @staticmethod\r\n def __g_mul(byte1: int, byte2: int) -> int:\r\n ret_byte = 0x00\r\n for i in range(8):\r\n ret_byte = ret_byte ^ byte1 if (byte2 & 0x01) != 0 else ret_byte\r\n byte1 = (byte1 << 1) ^ RG_FIELD if (byte1 & 0x80) != 0 else byte1 << 1\r\n byte2 >>= 1\r\n return ret_byte % 256\r\n\r\n \"\"\"\r\n rotates an array to the left once.\r\n \"\"\"\r\n @staticmethod\r\n def __rotate_bytes_left(array: bytes) -> bytes:\r\n return bytes([array[(i + 1) % len(array)] for i in range(len(array))])\r\n","repo_name":"naveh94/SecureProgramming-Project","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":18118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"8316120619","text":"from time import sleep\r\nfrom pynput.keyboard import Key, Controller\r\nimport discord\r\n\r\n\r\nkeyboard = Controller()\r\n\r\n\r\n\r\nclient = discord.Client()\r\n\r\n\r\n\r\nTOKEN = \"TYPE YOUR TOKEN HERE\"#--------------------------------------------------------------------------------------\r\n\r\n\r\nclient.user = input(\"What is the name of the person that is using the bot (without tag and spaces) \")\r\nclient.usersname = \"======[ \", client.user , \"'s Adventure ]======\"\r\n\r\n\r\n\r\n@client.event\r\nasync def on_ready():\r\n print(\"We have logged in as {0.user}\".format(client))\r\n\r\n\r\n\r\n\r\n@client.event\r\nasync def on_message(message):\r\n username = str(message.author)\r\n user_message = str(message.content)\r\n channel = str(message.channel.name)\r\n\r\n\r\n\r\n if message.author == client.user:\r\n return\r\n\r\n if username == \"DiscordRPG#0366\":\r\n#!adv\r\n if message.channel.name == \"discord-rpg-1\": #discord rpg deciding heal or pheal or adv\r\n print(user_message)\r\n if user_message[3] == \"d\":\r\n if user_message.split(\"!\")[1] == client.usersname:\r\n\r\n client.pethp = \"\"\r\n client.petmaxhp = \"\"\r\n client.index1 = 0\r\n client.index2 = 0\r\n\r\n client.playermaxhp = \"\"\r\n client.playerhp = \"\"\r\n\r\n #pet max hp\r\n client.index1 = user_message.index(\"HP left\")\r\n client.index2= user_message.index(\"/\")\r\n for i in range(client.index2 + 1, client.index1):\r\n if user_message[i] != \",\":\r\n client.petmaxhp = client.petmaxhp + user_message[i]\r\n #pet max hp\r\n #-----------------------------------\r\n #pethp\r\n client.index1 = user_message.index(\"/\")\r\n client.index2 = user_message.index(\"+ Pet Rock has \")\r\n for i in range(client.index2 + 15, client.index1):\r\n if user_message[i] != \",\":\r\n client.pethp = client.pethp + user_message[i]\r\n #pethp\r\n #---------------------------------------------------------------------------------------------\r\n #playermaxhp\r\n client.index1 = user_message.index(\"HP left.\")\r\n client.index2 = user_message.index(\"/\",user_message.index(\"/\")+2)\r\n for i in range(client.index2 + 1, client.index1):\r\n if user_message[i] != \",\":\r\n client.playermaxhp = client.playermaxhp + user_message[i]\r\n #playermaxhp\r\n # -----------------------------------\r\n #playerhp\r\n client.index1 = user_message.index(\"/\", user_message.index(\"/\") + 2)\r\n client.index2 = user_message.index(\"o has \") +6\r\n for i in range(client.index2, client.index1):\r\n if user_message[i] != \",\":#!adv\r\n\r\n client.playerhp = client.playerhp + user_message[i]\r\n #playerhp\r\n await message.channel.send(f\"{client.playerhp} = playerhp\")\r\n await message.channel.send(f\"{client.playermaxhp} = playermaxhp\")\r\n await message.channel.send(f\"{client.pethp} = pethp\")\r\n await message.channel.send(f\"{client.petmaxhp} = petmaxhp\")\r\n print(user_message)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n sleep(15)\r\n\r\n if int(client.pethp) <= int(client.petmaxhp)/2: #pheal\r\n sleep(7)\r\n keyboard.press(\"#\")\r\n keyboard.release(\"#\")\r\n\r\n keyboard.press(\"!\")\r\n keyboard.release(\"!\")\r\n\r\n keyboard.press(\"p\")\r\n keyboard.release(\"p\")\r\n\r\n keyboard.press(\"h\")\r\n keyboard.release(\"h\")\r\n\r\n keyboard.press(\"e\")\r\n keyboard.release(\"e\")\r\n\r\n keyboard.press(\"a\")\r\n keyboard.release(\"a\")\r\n\r\n keyboard.press(\"l\")\r\n keyboard.release(\"l\")\r\n\r\n keyboard.press(\" \")\r\n keyboard.release(\" \")\r\n\r\n keyboard.press(\"5\")\r\n keyboard.release(\"5\")\r\n\r\n keyboard.press(Key.enter)\r\n keyboard.release(Key.enter)\r\n\r\n\r\n if int(client.playerhp) <= int(client.playermaxhp)/2:\r\n sleep(7)\r\n\r\n keyboard.press(\"#\")\r\n keyboard.release(\"#\")\r\n\r\n keyboard.press(\"!\")\r\n keyboard.release(\"!\")\r\n\r\n keyboard.press(\"h\")\r\n keyboard.release(\"h\")\r\n\r\n keyboard.press(\"e\")\r\n keyboard.release(\"e\")\r\n\r\n keyboard.press(\"a\")\r\n keyboard.release(\"a\")\r\n #!adv\r\n\r\n keyboard.press(\"l\")\r\n keyboard.release(\"l\")\r\n\r\n keyboard.press(\" \")\r\n keyboard.release(\" \")\r\n\r\n keyboard.press(\"5\")#!forage\r\n\r\n keyboard.release(\"5\")\r\n\r\n keyboard.press(Key.enter)\r\n keyboard.release(Key.enter)\r\n\r\n\r\n\r\n keyboard.press(\"#\")\r\n keyboard.release(\"#\")\r\n\r\n keyboard.press(\"!\")\r\n keyboard.release(\"!\")\r\n\r\n keyboard.press(\"a\")\r\n keyboard.release(\"a\")\r\n\r\n keyboard.press(\"d\")\r\n keyboard.release(\"d\")\r\n\r\n keyboard.press(\"v\")\r\n keyboard.release(\"v\")\r\n\r\n keyboard.press(Key.enter)\r\n keyboard.release(Key.enter)\r\n\r\n\r\n\r\n\r\n\r\nclient.run(TOKEN)","repo_name":"Kreytorn/Discord-Dungeons-playing-adventures","sub_path":"Discord dungeons adventures code.py","file_name":"Discord dungeons adventures code.py","file_ext":"py","file_size_in_byte":6227,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"}
+{"seq_id":"72248791252","text":"from setuptools import setup\n\ndependencies = [\n \"python-jose\",\n \"pika\",\n \"lxml\"\n]\n\nsetup(name='adaptmb',\n version='0.1',\n description='ADAPT Python Message Bus API',\n url='http://www.almaobservatory.org',\n author='ALMA',\n author_email='test@alma.cl',\n license='LGPL',\n packages=['adapt', 'adapt/messagebus', 'adapt/messagebus/rabbitmq', 'adapt/messagebus/security', 'adapt/messagebus/configuration'],\n install_requires=dependencies,\n zip_safe=False)\n","repo_name":"amchavan/alma-datapro-workflow-sandbox","sub_path":"message-bus/src/main/python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"18134706702","text":"import os\nimport sys\nimport sip\nfrom PyQt5 import QtCore\nfrom PyQt5.QtWidgets import QMessageBox\n\nfrom vorta.borg.create import BorgCreateThread\nfrom vorta.borg.version import BorgVersionThread\nfrom vorta.config import TEMP_DIR\nfrom vorta.i18n import init_translations, translate\nfrom vorta.models import BackupProfileModel, SettingsModel\nfrom vorta.qt_single_application import QtSingleApplication\nfrom vorta.scheduler import VortaScheduler\nfrom vorta.tray_menu import TrayMenu\nfrom vorta.utils import borg_compat, parse_args\nfrom vorta.views.main_window import MainWindow\nfrom vorta.notifications import VortaNotifications\n\nAPP_ID = os.path.join(TEMP_DIR, \"socket\")\n\n\nclass VortaApp(QtSingleApplication):\n \"\"\"\n All windows and QWidgets are children of this app.\n\n When running Borg-commands, the class `BorgThread` will emit events\n via the `VortaApp` class to which other windows will subscribe to.\n \"\"\"\n\n backup_started_event = QtCore.pyqtSignal()\n backup_finished_event = QtCore.pyqtSignal(dict)\n backup_cancelled_event = QtCore.pyqtSignal()\n backup_log_event = QtCore.pyqtSignal(str)\n backup_progress_event = QtCore.pyqtSignal(str)\n\n def __init__(self, args_raw, single_app=False):\n\n super().__init__(APP_ID, args_raw)\n if self.isRunning() and single_app:\n self.sendMessage(\"open main window\")\n print('An instance of Vorta is already running. Opening main window.')\n sys.exit()\n\n init_translations(self)\n\n self.setQuitOnLastWindowClosed(False)\n self.scheduler = VortaScheduler(self)\n self.setApplicationName(\"Vorta\")\n\n # Prepare system tray icon\n self.tray = TrayMenu(self)\n\n args = parse_args()\n if getattr(args, 'daemonize', False):\n pass\n elif SettingsModel.get(key='foreground').value:\n self.open_main_window_action()\n\n self.backup_started_event.connect(self.backup_started_event_response)\n self.backup_finished_event.connect(self.backup_finished_event_response)\n self.backup_cancelled_event.connect(self.backup_cancelled_event_response)\n self.message_received_event.connect(self.message_received_event_response)\n self.set_borg_details_action()\n self.installEventFilter(self)\n\n def eventFilter(self, source, event):\n if event.type() == QtCore.QEvent.ApplicationPaletteChange and type(source) == MainWindow:\n self.main_window.set_icons()\n self.main_window.repoTab.set_icons()\n self.main_window.archiveTab.set_icons()\n self.main_window.scheduleTab.set_icons()\n if event.type() == QtCore.QEvent.ApplicationPaletteChange and source == self.tray.contextMenu():\n self.tray.set_tray_icon()\n return False\n\n def create_backup_action(self, profile_id=None):\n if not profile_id:\n profile_id = self.main_window.current_profile.id\n\n profile = BackupProfileModel.get(id=profile_id)\n msg = BorgCreateThread.prepare(profile)\n if msg['ok']:\n thread = BorgCreateThread(msg['cmd'], msg, parent=self)\n thread.start()\n else:\n notifier = VortaNotifications.pick()\n notifier.deliver(self.tr('Vorta Backup'), translate('messages', msg['message']), level='error')\n self.backup_progress_event.emit(translate('messages', msg['message']))\n\n def open_main_window_action(self):\n if not self._main_window_exists():\n self.main_window = MainWindow(self)\n self.main_window.show()\n self.main_window.raise_()\n\n def _main_window_exists(self):\n return hasattr(self, 'main_window') and not sip.isdeleted(self.main_window)\n\n def toggle_main_window_visibility(self):\n if self._main_window_exists():\n self.main_window.close()\n else:\n self.open_main_window_action()\n\n def backup_started_event_response(self):\n self.tray.set_tray_icon(active=True)\n\n def backup_finished_event_response(self):\n self.tray.set_tray_icon()\n\n def backup_cancelled_event_response(self):\n self.tray.set_tray_icon()\n\n def message_received_event_response(self, message):\n if message == \"open main window\":\n self.open_main_window_action()\n\n def set_borg_details_action(self):\n params = BorgVersionThread.prepare()\n if not params['ok']:\n self._alert_missing_borg()\n return\n thread = BorgVersionThread(params['cmd'], params, parent=self)\n thread.result.connect(self.set_borg_details_result)\n thread.start()\n\n def set_borg_details_result(self, result):\n \"\"\"\n Receive result from BorgVersionThread. If MainWindow is open, set the version in misc tab.\n If no valid version was found, display an error.\n \"\"\"\n if 'version' in result['data']:\n borg_compat.set_version(result['data']['version'], result['data']['path'])\n if self._main_window_exists():\n self.main_window.miscTab.set_borg_details(borg_compat.version, borg_compat.path)\n self.main_window.repoTab.toggle_available_compression()\n else:\n self._alert_missing_borg()\n\n def _alert_missing_borg(self):\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Critical)\n msg.setText(self.tr(\"No Borg Binary Found\"))\n msg.setInformativeText(self.tr(\"Vorta was unable to locate a usable Borg Backup binary.\"))\n msg.setStandardButtons(QMessageBox.Ok)\n msg.exec_()\n","repo_name":"Hofer-Julian/vorta","sub_path":"src/vorta/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":5574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"}
+{"seq_id":"28494340547","text":"class Solution(object):\n from math import trunc\n def evalRPN(self, tokens):\n \"\"\"\n :type tokens: List[str]\n :rtype: int\n \"\"\"\n op = ['/', '+', '*','-']\n stack = []\n for operand in tokens:\n if operand in op :\n value = trunc(eval(stack.pop(-2) + operand + stack.pop(-1)))\n stack.append(str(value))\n \n else:\n stack.append(operand)\n return int(stack.pop())","repo_name":"fasil729/Comptetive-Programming-A2SV","sub_path":"0150-evaluate-reverse-polish-notation/0150-evaluate-reverse-polish-notation.py","file_name":"0150-evaluate-reverse-polish-notation.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"19017247140","text":"from flask import render_template, request, current_app, redirect, url_for, flash, jsonify\nfrom . import main\nfrom flask_login import login_required, current_user\nfrom ..models import TodoList, TodoItems\nfrom .. import db\nfrom datetime import datetime\n\n@main.route('/')\ndef index():\n return render_template('index.html')\n\n@main.route('/dashboard')\n@login_required\ndef dashboard():\n # get all todo lists for the current user\n todo_lists = TodoList.query.filter_by(user_id=current_user.id).all()\n\n # calculate number of tasks and completed tasks for each list\n for todo_list in todo_lists:\n total_tasks = len(todo_list.tasks)\n completed_tasks = len([task for task in todo_list.tasks if task.completed])\n todo_list.total_tasks = total_tasks\n todo_list.completed_tasks = completed_tasks\n todo_list.progress = int((completed_tasks / total_tasks) * 100) if total_tasks > 0 else 0\n\n # Check if the user has any lists\n has_lists = (len(todo_lists) > 0)\n\n return render_template('dashboard.html', todo_lists=todo_lists, has_lists=has_lists)\n\n\n@login_required\n@main.route('/tasks', methods=['POST'])\ndef new_list():\n if request.method == 'POST':\n user_id = current_user.id\n # Create a new to-do list\n list_name = request.form['listName']\n due_date_str = request.form['deadline']\n due_date = datetime.strptime(due_date_str, '%Y-%m-%d') if due_date_str else None\n\n new_list = TodoList(list_name=list_name, due_date=due_date, user_id=user_id)\n db.session.add(new_list)\n db.session.commit()\n\n # Add each non-empty item to the new to-do list\n list_id = new_list.id\n tasks = request.form.getlist('tasks')\n for task in tasks:\n if task.strip(): # Check if task is not empty after stripping whitespace\n new_item = TodoItems(task=task, list_id=list_id)\n db.session.add(new_item)\n\n db.session.commit()\n\n return redirect(url_for('main.tasks'))\n return render_template('tasks.html')\n\n\n@login_required\n@main.route('/tasks')\ndef tasks():\n # Query the current user's todo lists and associated tasks\n completed_lists = TodoList.query.filter_by(user_id=current_user.id, completed=True).all()\n uncompleted_lists = TodoList.query.filter_by(user_id=current_user.id, completed=False).all()\n\n # Create a dictionary that maps todo list names to their associated tasks\n completed_tasks_by_list = {}\n uncompleted_tasks_by_list = {}\n \n for todo_list in completed_lists:\n tasks = [task for task in todo_list.tasks]\n completed_tasks_by_list[todo_list.list_name] = {'id': todo_list.id, 'tasks': tasks, 'due_date': todo_list.due_date,}\n\n for todo_list in uncompleted_lists:\n tasks = [task for task in todo_list.tasks]\n uncompleted_tasks_by_list[todo_list.list_name] = {'id': todo_list.id, 'tasks': tasks, 'due_date': todo_list.due_date,}\n\n # Check if the user has any lists\n has_lists = (len(completed_lists) > 0 or len(uncompleted_lists) > 0)\n\n # Pass the dictionaries to the template\n return render_template('tasks.html', completed_tasks_by_list=completed_tasks_by_list, uncompleted_tasks_by_list=uncompleted_tasks_by_list, has_lists=has_lists)\n\n\n@login_required\n@main.route('/tasks/complete/', methods=['POST'])\ndef complete_task(task_id):\n # Get the task object from the database\n task = TodoItems.query.get_or_404(task_id)\n\n # Update the completed status of the task based on the POST request data\n task.completed = request.json['completed']\n\n # Calculate the number of completed tasks and total tasks for the associated todo list\n todo_list = task.todo_list\n completed_tasks = sum(1 for task in todo_list.tasks if task.completed)\n total_tasks = len(todo_list.tasks)\n\n # Update the completed status of the associated todo list\n if total_tasks > 0 and total_tasks == completed_tasks:\n todo_list.completed = True\n else:\n todo_list.completed = False\n\n # Commit the changes to the database\n db.session.commit()\n\n # Return the updated task and associated todo list as JSON\n return jsonify({'task': task.to_dict(), 'todo_list': todo_list.to_dict()})\n\n\n@login_required\n@main.route('/tasks/edit/', methods=['GET', 'POST'])\ndef edit_list(list_id):\n todo_list = TodoList.query.get_or_404(list_id)\n if request.method == 'POST':\n list_name = request.form['listName']\n due_date_str = request.form['deadline']\n due_date = datetime.strptime(due_date_str, '%Y-%m-%d') if due_date_str else None\n tasks = request.form.getlist('tasks[]')\n # Update task list\n todo_list.list_name = list_name\n todo_list.due_date = due_date\n # Update tasks\n existing_task_ids = [task.id for task in todo_list.tasks]\n for i, task_name in enumerate(tasks):\n if i < len(existing_task_ids):\n task = TodoItems.query.get(existing_task_ids[i])\n task.task = task_name\n else:\n task = TodoItems(task=task_name, list_id=todo_list.id)\n db.session.add(task)\n\n db.session.commit()\n\n return redirect(url_for('main.tasks'))\n\n return render_template('edit_list.html', todo_list=todo_list)\n\n@login_required\n@main.route('/tasks/delete/', methods=['POST'])\ndef delete_task(task_id):\n task = TodoItems.query.get_or_404(task_id)\n db.session.delete(task)\n db.session.commit()\n flash('Task deleted successfully')\n return redirect(url_for('main.edit_list', list_id=task.list_id))\n\n@login_required\n@main.route('/tasks/delete-list/', methods=['DELETE'])\ndef delete_list(list_id):\n todo_list = TodoList.query.get_or_404(list_id)\n\n # delete tasks associated with the list\n for task in todo_list.tasks:\n db.session.delete(task)\n\n # delete the list itself\n db.session.delete(todo_list)\n db.session.commit()\n\n return jsonify({'redirect': url_for('main.tasks')})\n\n","repo_name":"psivonen/taskmanager","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"42351991156","text":"# ailia APPS Safety Detection\r\n# (C) 2023 AXELL CORPORATION\r\n\r\nimport sys\r\nimport time\r\nfrom signal import SIGINT\r\n\r\nimport numpy as np\r\nimport cv2\r\nimport json\r\nfrom matplotlib import cm\r\nfrom PIL import Image, ImageTk\r\n\r\nimport ailia\r\n\r\n# import original modules\r\nsys.path.append('./util')\r\nfrom utils import get_base_parser, update_parser\r\n# logger\r\nfrom logging import getLogger # noqa: E402\r\n\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\nimport tkinter.filedialog\r\nimport os\r\n\r\nlogger = getLogger(__name__)\r\n\r\n# ======================\r\n# Arguemnt Parser Config\r\n# ======================\r\n\r\nparser = get_base_parser(\r\n 'ailia APPS safety detection', None, None)\r\n\r\nargs = update_parser(parser)\r\n\r\n\r\n# ======================\r\n# Video\r\n# ======================\r\n\r\ninput_index = 0\r\nlistsInput = None\r\nListboxInput = None\r\ninput_list = []\r\n\r\ndef get_input_list():\r\n if args.debug:\r\n return [\"Camera:0\"]\r\n\r\n index = 0\r\n inputs = []\r\n while True:\r\n cap = cv2.VideoCapture(index)\r\n if cap.isOpened():\r\n inputs.append(\"Camera:\"+str(index))\r\n else:\r\n break\r\n index=index+1\r\n cap.release()\r\n\r\n if len(inputs) == 0:\r\n inputs.append(\"demo.mp4\")\r\n\r\n return inputs\r\n\r\ndef input_changed(event):\r\n global input_index, input_list, textInputVideoDetail\r\n selection = event.widget.curselection()\r\n if selection:\r\n input_index = selection[0]\r\n else:\r\n input_index = 0 \r\n if \"Camera:\" in input_list[input_index]:\r\n textInputVideoDetail.set(input_list[input_index])\r\n else:\r\n textInputVideoDetail.set(os.path.basename(input_list[input_index]))\r\n \r\n #print(\"input\",input_index)\r\n\r\ndef input_video_dialog():\r\n global textInputVideoDetail, listsInput, ListboxInput, input_index, input_list\r\n fTyp = [(\"All Files\", \"*.*\"), (\"Video files\",\"*.mp4\")]\r\n iDir = os.path.abspath(os.path.dirname(__file__))\r\n file_name = tk.filedialog.askopenfilename(filetypes=fTyp, initialdir=iDir)\r\n if len(file_name) != 0:\r\n textInputVideoDetail.set(os.path.basename(file_name))\r\n input_list.append(file_name)\r\n listsInput.set(input_list)\r\n ListboxInput.select_clear(input_index)\r\n input_index = len(input_list)-1\r\n ListboxInput.select_set(input_index)\r\n\r\ndef apply_path_to_ui():\r\n global textOutputVideoDetail\r\n textOutputVideoDetail.set(os.path.basename(args.savepath))\r\n global textOutputCsvDetail\r\n textOutputCsvDetail.set(os.path.basename(args.csvpath))\r\n global textOutputImageDetail\r\n textOutputImageDetail.set(os.path.basename(args.imgpath))\r\n\r\ndef output_video_dialog():\r\n global textOutputVideoDetail\r\n fTyp = [(\"Output Video File\", \"*\")]\r\n iDir = os.path.abspath(os.path.dirname(__file__))\r\n file_name = tk.filedialog.asksaveasfilename(filetypes=fTyp, initialdir=iDir)\r\n if len(file_name) != 0:\r\n args.savepath = file_name\r\n apply_path_to_ui()\r\n\r\ndef output_csv_dialog():\r\n global textOutputCsvDetail\r\n fTyp = [(\"Output Csv File\", \"*\")]\r\n iDir = os.path.abspath(os.path.dirname(__file__))\r\n file_name = tk.filedialog.asksaveasfilename(filetypes=fTyp, initialdir=iDir)\r\n if len(file_name) != 0:\r\n args.csvpath = file_name\r\n apply_path_to_ui()\r\n\r\ndef output_img_dialog():\r\n fTyp = [(\"Output Image Folder\", \"*\")]\r\n iDir = os.path.abspath(os.path.dirname(__file__))\r\n file_name = tk.filedialog.askdirectory(initialdir=iDir)\r\n if len(file_name) != 0:\r\n args.imgpath = file_name\r\n apply_path_to_ui()\r\n\r\n# ======================\r\n# Environment\r\n# ======================\r\n\r\nenv_index = args.env_id\r\n\r\ndef get_env_list():\r\n env_list = []\r\n for env in ailia.get_environment_list():\r\n env_list.append(env.name)\r\n return env_list \r\n\r\ndef environment_changed(event):\r\n global env_index\r\n selection = event.widget.curselection()\r\n if selection:\r\n env_index = selection[0]\r\n else:\r\n env_index = 0\r\n #print(\"env\",env_index)\r\n\r\n# ======================\r\n# Model\r\n# ======================\r\n\r\nmodel_index = 0\r\n\r\ndef get_model_list():\r\n model_list = [\"yolox_poseresnet\"]\r\n return model_list \r\n\r\ndef model_changed(event):\r\n global model_index\r\n selection = event.widget.curselection()\r\n if selection:\r\n model_index = selection[0]\r\n else:\r\n model_index = 0\r\n #print(\"model\",model_index)\r\n\r\n# ======================\r\n# Area setting\r\n# ======================\r\n\r\ndef get_video_path():\r\n global input_list, input_index\r\n if \"Camera:\" in input_list[input_index]:\r\n return input_index\r\n else:\r\n return input_list[input_index]\r\n\r\n# ======================\r\n# Menu functions\r\n# ======================\r\n\r\ndef get_settings():\r\n settings = {}\r\n\r\n global model_index\r\n settings[\"model_type\"] = get_model_list()[model_index]\r\n\r\n global detectionThresholdTextEntry\r\n settings[\"detection_threshold\"] = detectionThresholdTextEntry.get()\r\n\r\n global poseThresholdTextEntry\r\n settings[\"pose_threshold\"] = poseThresholdTextEntry.get()\r\n\r\n global checkBoxCategoryFallenBln\r\n if checkBoxCategoryFallenBln.get():\r\n settings[\"category_fallen\"] = True\r\n else:\r\n settings[\"category_fallen\"] = False\r\n \r\n global checkBoxCategorySittingBln\r\n if checkBoxCategorySittingBln.get():\r\n settings[\"category_sitting\"] = True\r\n else:\r\n settings[\"category_sitting\"] \r\n\r\n settings[\"savepath\"] = args.savepath\r\n settings[\"csvpath\"] = args.csvpath\r\n settings[\"imgpath\"] = args.imgpath\r\n\r\n return settings\r\n\r\ndef set_settings(settings):\r\n global model_index, ListboxModel\r\n model_list = get_model_list()\r\n for i in range(len(model_list)):\r\n if settings[\"model_type\"] == model_list[i]:\r\n model_index = i\r\n ListboxModel.select_set(model_index)\r\n\r\n global detectionThresholdTextEntry\r\n detectionThresholdTextEntry.delete(0, tk.END)\r\n detectionThresholdTextEntry.insert(0, str(settings[\"detection_threshold\"]))\r\n\r\n global poseThresholdTextEntry\r\n poseThresholdTextEntry.delete(0, tk.END)\r\n poseThresholdTextEntry.insert(0, str(settings[\"pose_threshold\"]))\r\n\r\n global checkBoxCategoryFallenBln\r\n checkBoxCategoryFallenBln.set(settings[\"category_fallen\"])\r\n\r\n global checkBoxCategorySittingBln\r\n checkBoxCategorySittingBln.set(settings[\"category_sitting\"])\r\n\r\n if \"savepath\" in settings:\r\n args.savepath = settings[\"savepath\"]\r\n if \"csvpath\" in settings:\r\n args.csvpath = settings[\"csvpath\"]\r\n if \"imgpath\" in settings:\r\n args.imgpath = settings[\"imgpath\"]\r\n \r\n apply_path_to_ui()\r\n\r\ndef menu_file_open_click():\r\n fTyp = [(\"Config files\",\"*.json\")]\r\n iDir = os.path.abspath(os.path.dirname(__file__))\r\n file_name = tk.filedialog.askopenfilename(filetypes=fTyp, initialdir=iDir)\r\n if len(file_name) != 0:\r\n with open(file_name, 'r') as json_file:\r\n settings = json.load(json_file)\r\n set_settings(settings)\r\n\r\ndef menu_file_saveas_click():\r\n fTyp = [(\"Config files\", \"*.json\")]\r\n iDir = os.path.abspath(os.path.dirname(__file__))\r\n file_name = tk.filedialog.asksaveasfilename(filetypes=fTyp, initialdir=iDir)\r\n if len(file_name) != 0:\r\n with open(file_name, 'w') as json_file:\r\n settings = get_settings()\r\n json.dump(settings, json_file)\r\n\r\ndef menu(root):\r\n menubar = tk.Menu(root)\r\n\r\n menu_file = tk.Menu(menubar, tearoff = False)\r\n menu_file.add_command(label = \"Load settings\", command = menu_file_open_click, accelerator=\"Ctrl+O\")\r\n menu_file.add_command(label = \"Save settings\", command = menu_file_saveas_click, accelerator=\"Ctrl+S\")\r\n #menu_file.add_separator() # 仕切り線\r\n #menu_file.add_command(label = \"Quit\", command = root.destroy)\r\n\r\n menubar.add_cascade(label=\"File\", menu=menu_file)\r\n\r\n root.config(menu=menubar)\r\n\r\n# ======================\r\n# GUI functions\r\n# ======================\r\n\r\nroot = None\r\nresolutionTextEntry = None\r\nareaThresholdTextEntry = None\r\nlabelAcceptTextEntry = None\r\nlabelDenyTextEntry = None\r\ncheckBoxMultipleAssignBln = None\r\nListboxModel = None\r\n\r\ndef ui():\r\n # rootメインウィンドウの設定\r\n global root\r\n root = tk.Tk()\r\n root.title(\"ailia APPS Safety Detection\")\r\n root.geometry(\"720x360\")\r\n\r\n # メニュー作成\r\n menu(root)\r\n\r\n # 環境情報取得\r\n global input_list\r\n input_list = get_input_list()\r\n model_list = get_model_list()\r\n env_list = get_env_list()\r\n\r\n # メインフレームの作成と設置\r\n frame = ttk.Frame(root)\r\n frame.pack(padx=10,pady=10)\r\n\r\n textInputVideo = tk.StringVar(frame)\r\n textInputVideo.set(\"Input video\")\r\n buttonInputVideo = tk.Button(frame, textvariable=textInputVideo, command=input_video_dialog, width=14)\r\n buttonInputVideo.grid(row=0, column=0, sticky=tk.NW)\r\n\r\n global textInputVideoDetail\r\n textInputVideoDetail = tk.StringVar(frame)\r\n textInputVideoDetail.set(input_list[input_index])\r\n labelInputVideoDetail = tk.Label(frame, textvariable=textInputVideoDetail)\r\n labelInputVideoDetail.grid(row=0, column=1, sticky=tk.NW)\r\n\r\n textOutputVideo = tk.StringVar(frame)\r\n textOutputVideo.set(\"Output video\")\r\n buttonOutputVideo = tk.Button(frame, textvariable=textOutputVideo, command=output_video_dialog, width=14)\r\n buttonOutputVideo.grid(row=1, column=0, sticky=tk.NW)\r\n\r\n global textOutputVideoDetail\r\n textOutputVideoDetail = tk.StringVar(frame)\r\n textOutputVideoDetail.set(args.savepath)\r\n labelOutputVideoDetail= tk.Label(frame, textvariable=textOutputVideoDetail)\r\n labelOutputVideoDetail.grid(row=1, column=1, sticky=tk.NW)\r\n\r\n textOutputCsv = tk.StringVar(frame)\r\n textOutputCsv.set(\"Output csv\")\r\n buttonOutputCsv = tk.Button(frame, textvariable=textOutputCsv, command=output_csv_dialog, width=14)\r\n buttonOutputCsv.grid(row=2, column=0, sticky=tk.NW)\r\n\r\n global textOutputCsvDetail\r\n textOutputCsvDetail = tk.StringVar(frame)\r\n textOutputCsvDetail.set(args.csvpath)\r\n labelOutputCsvDetail= tk.Label(frame, textvariable=textOutputCsvDetail)\r\n labelOutputCsvDetail.grid(row=2, column=1, sticky=tk.NW)\r\n\r\n textOutputImage = tk.StringVar(frame)\r\n textOutputImage.set(\"Output image\")\r\n buttonOutputImage = tk.Button(frame, textvariable=textOutputImage, command=output_img_dialog, width=14)\r\n buttonOutputImage.grid(row=3, column=0, sticky=tk.NW)\r\n\r\n global textOutputImageDetail\r\n textOutputImageDetail = tk.StringVar(frame)\r\n textOutputImageDetail.set(args.imgpath)\r\n labelOutputImageDetail= tk.Label(frame, textvariable=textOutputImageDetail)\r\n labelOutputImageDetail.grid(row=3, column=1, sticky=tk.NW)\r\n\r\n textTrainVideo = tk.StringVar(frame)\r\n textTrainVideo.set(\"Run\")\r\n buttonTrainVideo = tk.Button(frame, textvariable=textTrainVideo, command=run, width=14)\r\n buttonTrainVideo.grid(row=4, column=0, sticky=tk.NW)\r\n\r\n textTrainVideo = tk.StringVar(frame)\r\n textTrainVideo.set(\"Stop\")\r\n buttonTrainVideo = tk.Button(frame, textvariable=textTrainVideo, command=stop, width=14)\r\n buttonTrainVideo.grid(row=5, column=0, sticky=tk.NW)\r\n\r\n global listsInput, ListboxInput\r\n\r\n textInputVideoHeader = tk.StringVar(frame)\r\n textInputVideoHeader.set(\"Inputs\")\r\n labelInputVideoHeader = tk.Label(frame, textvariable=textInputVideoHeader)\r\n labelInputVideoHeader.grid(row=0, column=2, sticky=tk.NW)\r\n\r\n listsInput = tk.StringVar(value=input_list)\r\n ListboxInput = tk.Listbox(frame, listvariable=listsInput, width=26, height=4, selectmode=\"single\", exportselection=False)\r\n ListboxInput.bind(\"<>\", input_changed)\r\n ListboxInput.select_set(input_index)\r\n ListboxInput.grid(row=1, column=2, sticky=tk.NW, rowspan=3, columnspan=2)\r\n\r\n lists = tk.StringVar(value=model_list)\r\n listEnvironment =tk.StringVar(value=env_list)\r\n\r\n global ListboxModel\r\n ListboxModel = tk.Listbox(frame, listvariable=lists, width=26, height=4, selectmode=\"single\", exportselection=False)\r\n ListboxEnvironment = tk.Listbox(frame, listvariable=listEnvironment, width=26, height=4, selectmode=\"single\", exportselection=False)\r\n\r\n ListboxModel.bind(\"<>\", model_changed)\r\n ListboxEnvironment.bind(\"<>\", environment_changed)\r\n\r\n ListboxModel.select_set(model_index)\r\n ListboxEnvironment.select_set(env_index)\r\n\r\n textModel = tk.StringVar(frame)\r\n textModel.set(\"Models\")\r\n labelModel = tk.Label(frame, textvariable=textModel)\r\n labelModel.grid(row=4, column=2, sticky=tk.NW, rowspan=1)\r\n ListboxModel.grid(row=5, column=2, sticky=tk.NW, rowspan=2)\r\n\r\n textEnvironment = tk.StringVar(frame)\r\n textEnvironment.set(\"Environment\")\r\n labelEnvironment = tk.Label(frame, textvariable=textEnvironment)\r\n labelEnvironment.grid(row=8, column=2, sticky=tk.NW, rowspan=1)\r\n ListboxEnvironment.grid(row=9, column=2, sticky=tk.NW, rowspan=4)\r\n\r\n textOptions = tk.StringVar(frame)\r\n textOptions.set(\"Options\")\r\n labelOptions = tk.Label(frame, textvariable=textOptions)\r\n labelOptions.grid(row=0, column=3, sticky=tk.NW)\r\n\r\n textDetectionThreshold = tk.StringVar(frame)\r\n textDetectionThreshold.set(\"Detection Threshold\")\r\n labeDetectionThreshold = tk.Label(frame, textvariable=textDetectionThreshold)\r\n labeDetectionThreshold.grid(row=1, column=3, sticky=tk.NW)\r\n\r\n global detectionThresholdTextEntry\r\n detectionThresholdTextEntry = tkinter.Entry(frame, width=20)\r\n detectionThresholdTextEntry.insert(tkinter.END,\"0.4\")\r\n detectionThresholdTextEntry.grid(row=2, column=3, sticky=tk.NW, rowspan=1)\r\n\r\n textPoseThreshold= tk.StringVar(frame)\r\n textPoseThreshold.set(\"Pose Threshold\")\r\n labelPoseThreshold = tk.Label(frame, textvariable=textPoseThreshold)\r\n labelPoseThreshold.grid(row=3, column=3, sticky=tk.NW)\r\n\r\n global poseThresholdTextEntry\r\n poseThresholdTextEntry = tkinter.Entry(frame, width=20)\r\n poseThresholdTextEntry.insert(tkinter.END,\"0.4\")\r\n poseThresholdTextEntry.grid(row=4, column=3, sticky=tk.NW, rowspan=1)\r\n\r\n textLabels = tk.StringVar(frame)\r\n textLabels.set(\"Detection Category\")\r\n labelLabels = tk.Label(frame, textvariable=textLabels)\r\n labelLabels.grid(row=5, column=3, sticky=tk.NW)\r\n\r\n global checkBoxCategoryFallenBln\r\n checkBoxCategoryFallenBln = tkinter.BooleanVar()\r\n checkBoxCategoryFallenBln.set(True)\r\n checkBoxCategoryFallenAssign = tkinter.Checkbutton(frame, variable=checkBoxCategoryFallenBln, text='Fallen')\r\n checkBoxCategoryFallenAssign.grid(row=6, column=3, sticky=tk.NW, rowspan=1)\r\n\r\n global checkBoxCategorySittingBln\r\n checkBoxCategorySittingBln = tkinter.BooleanVar()\r\n checkBoxCategorySittingBln.set(True)\r\n checkBoxCategorySittingAssign = tkinter.Checkbutton(frame, variable=checkBoxCategorySittingBln, text='Sitting')\r\n checkBoxCategorySittingAssign.grid(row=7, column=3, sticky=tk.NW, rowspan=1)\r\n\r\n root.mainloop()\r\n\r\n# ======================\r\n# MAIN functions\r\n# ======================\r\n\r\ndef main():\r\n args.savepath = \"\"\r\n args.csvpath = \"\"\r\n args.imgpath = \"\"\r\n ui()\r\n\r\nimport subprocess\r\n\r\nproc = None\r\n\r\ndef run():\r\n global proc\r\n\r\n if not (proc==None):\r\n proc.kill()\r\n proc=None\r\n\r\n cmd = sys.executable\r\n\r\n args_dict = {}#vars(args)\r\n args_dict[\"video\"] = get_video_path()\r\n \r\n settings = get_settings()\r\n if settings[\"savepath\"]:\r\n args_dict[\"savepath\"] = settings[\"savepath\"]\r\n if settings[\"csvpath\"]:\r\n args_dict[\"csvpath\"] = settings[\"csvpath\"]\r\n if settings[\"imgpath\"]:\r\n args_dict[\"imgpath\"] = settings[\"imgpath\"]\r\n\r\n global model_index\r\n args_dict[\"model_type\"] = get_model_list()[model_index].split(\"-\")[0]\r\n\r\n global env_index\r\n args_dict[\"env_id\"] = env_index\r\n\r\n global detectionThresholdTextEntry\r\n if detectionThresholdTextEntry:\r\n args_dict[\"detection_threshold\"] = float(detectionThresholdTextEntry.get())\r\n\r\n global poseThresholdTextEntry\r\n if poseThresholdTextEntry:\r\n args_dict[\"pose_threshold\"] = float(poseThresholdTextEntry.get())\r\n\r\n global checkBoxCategoryFallenBln\r\n if checkBoxCategoryFallenBln.get():\r\n args_dict[\"category_fallen\"] = True\r\n\r\n global checkBoxCategorySittingBln\r\n if checkBoxCategorySittingBln.get():\r\n args_dict[\"category_sitting\"] = True\r\n\r\n options = []\r\n for key in args_dict:\r\n if key==\"ftype\":\r\n continue\r\n if args_dict[key] is not None:\r\n if args_dict[key] is True:\r\n options.append(\"--\"+key)\r\n elif args_dict[key] is False:\r\n continue\r\n else:\r\n options.append(\"--\"+key)\r\n options.append(str(args_dict[key]))\r\n\r\n cmd = [cmd, \"pose_resnet.py\"] + options\r\n print(\" \".join(cmd))\r\n\r\n dir = \"./pose_estimation/pose_resnet/\"\r\n\r\n proc = subprocess.Popen(cmd, cwd=dir)\r\n try:\r\n outs, errs = proc.communicate(timeout=1)\r\n except subprocess.TimeoutExpired:\r\n pass\r\n\r\n\r\ndef stop():\r\n global proc\r\n if not (proc==None):\r\n proc.send_signal(SIGINT)\r\n proc=None\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"axinc-ai/ailia-apps-safety-detection","sub_path":"ailia-apps-safety-detection.py","file_name":"ailia-apps-safety-detection.py","file_ext":"py","file_size_in_byte":17293,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"47528793927","text":"#!/usr/bin/env python3\n\nimport subprocess\n# from subprocess import check_output\n\n\ndef main() -> str:\n ''' Simple process that greps the output from timetrap to return the\n current sheet and the time entry thats running on it.\n '''\n timesheet_info = get_timetrap_output()\n default_sink = timesheet_info.split('*')[-1].strip()\n\n if default_sink:\n print(default_sink)\n\n\ndef get_timetrap_output() -> str:\n '''Grabs the output from timetrap ready for parsing\n '''\n timesheet_info = subprocess.check_output(\n ['/usr/local/bin/t',\n 'now'],\n stderr=subprocess.STDOUT).decode('ascii')\n return timesheet_info\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"martsa1/dotfiles","sub_path":"dotfiles/polybar/modules/timetrap.py","file_name":"timetrap.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"31146032191","text":"from hypothesis._strategies import floats\nfrom math import isclose, sqrt\n\nimport pytest\n\nfrom typing import List, Dict\n\nfrom dataclasses import replace\nfrom hypothesis import given, assume\nfrom pytest import approx\n\nfrom paitypes.geometry.Shape import Shape\nfrom paitypes.geometry.bounding_box.BoundingBox import (contains_ratio,\n intersection)\nfrom paitypes.geometry.bounding_box import (BoundingBox,\n BoundingBoxError,\n EMPTY_BBOX)\nfrom paitypes.geometry.Point import Point\nfrom paitypes.tests.fixtures.fixture_bounding_box import (\n empty_bbox, full_bbox, partial_bbox, partial_float_bbox)\nfrom paitypes.tests.fixtures.fixture_point import (\n top_left_point, top_right_point, bot_left_point, bot_right_point,\n neg_point, full_bbox_points)\nfrom paitypes.tests.strategies import bounding_boxes\n\n\ndef test_is_empty_partial_bbox(partial_bbox: BoundingBox) -> None:\n assert (not partial_bbox.is_empty())\n\n\ndef test_is_empty_full_bbox(full_bbox: BoundingBox) -> None:\n assert (not full_bbox.is_empty())\n\n\ndef test_is_empty_empty_bbox(empty_bbox: BoundingBox) -> None:\n assert (empty_bbox.is_empty())\n\n\ndef test_inverted_bbox_is_empty() -> None:\n bbox = BoundingBox(10.0, 10.0, 0.0, 0.0)\n assert (bbox.is_empty())\n\n\n@pytest.mark.parametrize('raw_dict', [\n {'x1': 1, 'x2': 2, 'y1': 3, 'y2': 4},\n {'x1': 1.0, 'x2': 2.0, 'y1': 3.0, 'y2': 4.0},\n {'x1': '1', 'x2': '2', 'y1': '3', 'y2': '4'},\n {'x1': '1.0', 'x2': '2.0', 'y1': '3.0', 'y2': '4.0'},\n])\ndef test_from_dict_input_format(raw_dict: Dict) -> None:\n bbox = BoundingBox.from_dict(raw_dict)\n assert (not bbox.is_empty())\n assert (bbox.x_min == 1.0)\n assert (bbox.x_max == 2.0)\n assert (bbox.y_min == 3.0)\n assert (bbox.y_max == 4.0)\n\n\n@pytest.mark.parametrize('raw_dict', [\n {'x1': 1, 'x2': 2, 'y1': 3, 'y2': 4},\n {'x1': 1.0, 'x2': 2.0, 'y1': 3.0, 'y2': 4.0},\n {'x1': '1', 'x2': '2', 'y1': '3', 'y2': '4'},\n {'x1': '1.0', 'x2': '2.0', 'y1': '3.0', 'y2': '4.0'},\n])\ndef test_from_dict_to_dict(raw_dict: Dict) -> None:\n bbox1 = BoundingBox.from_dict(raw_dict)\n bbox2 = BoundingBox.from_dict(\n BoundingBox.from_dict(raw_dict).to_dict())\n assert (bbox1 == bbox2)\n\n\ndef test_to_dict_from_dict(empty_bbox: BoundingBox,\n partial_bbox: BoundingBox,\n full_bbox: BoundingBox) -> None:\n for bbox in [empty_bbox, partial_bbox, full_bbox]:\n assert BoundingBox.from_dict(bbox.to_dict()) == bbox\n\n\n@pytest.mark.parametrize('raw_dict', [\n {'x1': 'a', 'x2': 'dasdf', 'y1': '3', 'y2': '4'},\n {'x1': '1.0', 'x2': '2.0', 'y1': 'fjh', 'y2': '4.0'},\n {'x2': '2.0', 'y1': '3.0', 'y2': '4.0'},\n {'x1': '1.0', 'x2': '2.0', 'y1': '3.0'},\n {'x1': '1.0', 'x2': '2.0', 'y1': '3.0', 'y2': '4.0', 'y3': '5.0'}\n])\ndef test_from_dict_invalid_input(raw_dict: Dict) -> None:\n with pytest.raises(BoundingBoxError):\n bbox = BoundingBox.from_dict(raw_dict)\n\n\ndef test_from_points_valid(full_bbox_points: List[Point],\n full_bbox: BoundingBox) -> None:\n bbox = BoundingBox.from_points(full_bbox_points)\n assert (not bbox.is_empty())\n assert (bbox == full_bbox)\n\n\ndef test_from_points_valid_neg(full_bbox_points: List[Point],\n neg_point: Point,\n full_bbox: BoundingBox) -> None:\n points = full_bbox_points + [neg_point]\n bbox = BoundingBox.from_points(points)\n expected_bbox = replace(full_bbox, x_min=-10.0, y_min=-10.0)\n assert (not bbox.is_empty())\n assert (bbox == expected_bbox)\n\n\ndef test_from_single_point_empty(full_bbox_points: List[Point]) -> None:\n for point in full_bbox_points:\n bbox = BoundingBox.from_points([point])\n assert (bbox.is_empty())\n\n\ndef test_eq_is_equal(empty_bbox: BoundingBox,\n partial_bbox: BoundingBox,\n full_bbox: BoundingBox) -> None:\n for a, b in [\n (empty_bbox, BoundingBox(0.0, 0.0, 0.0, 0.0)),\n (partial_bbox, BoundingBox(25.0, 75.0, 26.0, 70.0)),\n (full_bbox, BoundingBox(0.0, 100.0, 0.0, 100.0)),\n ]:\n assert (a == b)\n\n\ndef test_eq_not_equal(empty_bbox: BoundingBox,\n partial_bbox: BoundingBox,\n full_bbox: BoundingBox) -> None:\n for a, b in [\n (empty_bbox, partial_bbox),\n (partial_bbox, full_bbox),\n (full_bbox, empty_bbox),\n ]:\n assert (a != b)\n\n\ndef test_eq_invalid_type(empty_bbox: BoundingBox,\n partial_bbox: BoundingBox,\n full_bbox: BoundingBox) -> None:\n for a, b in [\n (empty_bbox, 3),\n (partial_bbox, 'a'),\n (full_bbox, Point(5, 8)),\n ]:\n assert (a != b)\n\n\ndef test_add_set_and_superset_returns_superset(full_bbox: BoundingBox,\n empty_bbox: BoundingBox,\n partial_bbox: BoundingBox\n ) -> None:\n for a, b in [\n (partial_bbox, partial_bbox),\n (partial_bbox, full_bbox),\n (full_bbox, full_bbox)\n ]:\n assert (a + b == b)\n\n\ndef test_offset_bboxes_returns_union(full_bbox: BoundingBox) -> None:\n other = BoundingBox(-100.0, 0.0, -100.0, 0.0)\n expected = BoundingBox(-100.0, 100.0, -100.0, 100.0)\n assert (full_bbox + other == expected)\n\n\ndef test_add_empty_bbox_raises(full_bbox: BoundingBox,\n empty_bbox: BoundingBox,\n partial_bbox: BoundingBox) -> None:\n for a, b in [\n (empty_bbox, empty_bbox),\n (empty_bbox, partial_bbox),\n (partial_bbox, empty_bbox),\n (empty_bbox, full_bbox)\n ]:\n with pytest.raises(BoundingBoxError):\n a + b\n\n\ndef test_add_other_type_raises(full_bbox: BoundingBox,\n partial_bbox: BoundingBox) -> None:\n for a, b in [\n (partial_bbox, 3),\n (partial_bbox, 'a'),\n (full_bbox, Point(0.0, 3.0))\n ]:\n with pytest.raises(NotImplementedError):\n a + b\n\n\n@given(bounding_boxes())\ndef test_intersection_same(bbox1: BoundingBox) -> None:\n assert intersection(bbox1, bbox1) == bbox1\n\n\n@given(bounding_boxes())\ndef test_intersection_empty(empty_bbox: BoundingBox,\n bbox1: BoundingBox) -> None:\n assert intersection(bbox1, empty_bbox) == empty_bbox\n\n\n@given(bounding_boxes(), bounding_boxes())\ndef test_intersection_symmetric(bbox1: BoundingBox,\n bbox2: BoundingBox) -> None:\n assert intersection(bbox1, bbox2) == intersection(bbox2, bbox1)\n\n\n@given(bounding_boxes())\ndef test_fully_contains(bbox1: BoundingBox) -> None:\n contained = BoundingBox(bbox1.x_min, bbox1.x_min + bbox1.delta_x / 2.0,\n bbox1.y_min, bbox1.y_min + bbox1.delta_y / 2.0)\n\n assume(contained.area != 0.0)\n assert contains_ratio(bbox1, contained) == 1.0\n\n\n@given(bounding_boxes())\ndef test_partially_contains(bbox1: BoundingBox) -> None:\n moved_bbox = BoundingBox(bbox1.x_min - bbox1.delta_x / 2.0,\n bbox1.x_min + bbox1.delta_x / 2.0,\n bbox1.y_min - bbox1.delta_y / 2.0,\n bbox1.y_min + bbox1.delta_y / 2.0)\n\n assume(moved_bbox.area != 0.0)\n assert isclose(contains_ratio(bbox1, moved_bbox), 0.25,\n abs_tol=1e-5)\n\n\n@given(bounding_boxes(), bounding_boxes())\ndef test_intersection_contained(bbox1: BoundingBox,\n bbox2: BoundingBox) -> None:\n if contains_ratio(bbox1, bbox2) == 1.0:\n assert intersection(bbox1, bbox2) == bbox2\n\n\n@given(bounding_boxes(), bounding_boxes())\ndef test_intersection_not_contained(bbox1: BoundingBox,\n bbox2: BoundingBox) -> None:\n if contains_ratio(bbox1, bbox2) == 0.0:\n assert (intersection(bbox1, bbox2) != bbox2 or\n bbox2.area == 0.0 or\n bbox1.area == 0.0\n )\n\n\n@given(bounding_boxes())\ndef test_no_move(bbox: BoundingBox) -> None:\n assert bbox.move(0, 0) == bbox\n\n\n@given(bounding_boxes(),\n floats(min_value=-10.0, max_value=10.0),\n floats(min_value=-10.0, max_value=10.0))\ndef test_scale_around_origin_changes_size(bbox: BoundingBox,\n sx: float,\n sy: float) -> None:\n scaled_bbox = bbox.scale(Shape(sx, sy))\n assert scaled_bbox.delta_x == approx(bbox.delta_x * sx, abs=1e-6)\n assert scaled_bbox.delta_y == approx(bbox.delta_y * sy, abs=1e-6)\n\n\n@given(bounding_boxes(),\n floats(min_value=-10.0, max_value=10.0),\n floats(min_value=-10.0, max_value=10.0))\ndef test_scale_around_origin_moves_center(bbox: BoundingBox,\n sx: float,\n sy: float) -> None:\n scaled_bbox = bbox.scale(Shape(sx, sy))\n assert scaled_bbox.center.x == approx(bbox.center.x * sx, abs=1e-6)\n assert scaled_bbox.center.y == approx(bbox.center.y * sy, abs=1e-6)\n\n\n@given(bounding_boxes(),\n floats(min_value=-10.0, max_value=10.0),\n floats(min_value=-10.0, max_value=10.0))\ndef test_scale_around_center_changes_size(bbox: BoundingBox,\n sx: float,\n sy: float) -> None:\n scaled_bbox = bbox.scale(Shape(sx, sy), center=bbox.center)\n assert scaled_bbox.delta_x == approx(bbox.delta_x * sx, abs=1e-6)\n assert scaled_bbox.delta_y == approx(bbox.delta_y * sy, abs=1e-6)\n\n\n@given(bounding_boxes(),\n floats(min_value=-10.0, max_value=10.0),\n floats(min_value=-10.0, max_value=10.0))\ndef test_scale_around_center_maintains_center(bbox: BoundingBox,\n sx: float,\n sy: float) -> None:\n scaled_bbox = bbox.scale(Shape(sx, sy), center=bbox.center)\n assert scaled_bbox.center.x == approx(bbox.center.x, abs=1e-6)\n assert scaled_bbox.center.y == approx(bbox.center.y, abs=1e-6)\n\n\n@given(bounding_boxes(),\n floats(min_value=-10.0, max_value=10.0),\n floats(min_value=-10.0, max_value=10.0))\ndef test_scale_around_corners_maintains_corners(bbox: BoundingBox,\n sx: float,\n sy: float) -> None:\n scaled_bbox = bbox.scale(Shape(sx, sy),\n center=Point(bbox.x_min, bbox.y_min))\n assert scaled_bbox.x_min == approx(bbox.x_min, abs=1e-6)\n assert scaled_bbox.y_min == approx(bbox.y_min, abs=1e-6)\n\n scaled_bbox = bbox.scale(Shape(sx, sy),\n center=Point(bbox.x_min, bbox.y_max))\n assert scaled_bbox.x_min == approx(bbox.x_min, abs=1e-6)\n assert scaled_bbox.y_max == approx(bbox.y_max, abs=1e-6)\n\n scaled_bbox = bbox.scale(Shape(sx, sy),\n center=Point(bbox.x_max, bbox.y_min))\n assert scaled_bbox.x_max == approx(bbox.x_max, abs=1e-6)\n assert scaled_bbox.y_min == approx(bbox.y_min, abs=1e-6)\n\n scaled_bbox = bbox.scale(Shape(sx, sy),\n center=Point(bbox.x_max, bbox.y_max))\n assert scaled_bbox.x_max == approx(bbox.x_max, abs=1e-6)\n assert scaled_bbox.y_max == approx(bbox.y_max, abs=1e-6)\n","repo_name":"PassengerAI/shared","sub_path":"paitypes/tests/unit/geometry/bounding_box/test_bounding_box.py","file_name":"test_bounding_box.py","file_ext":"py","file_size_in_byte":11536,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"30049843810","text":"import lexical\nimport syntax\nfrom tkinter import *\nfrom tkinter import constants\nfrom pyglet import font\n\nfont.add_file('assets/OpenSans-ExtraBold.ttf')\nfont.add_file('assets/OpenSans-Regular.ttf')\n\n\ndef run_lex(): # Run Lexical Analyzer\n inputValue = editorPane.get(\"1.0\", \"end-1c\")\n lex = lexical.lexer(inputValue)\n print_lex(lex.type, lex.value)\n print_error(lex.error)\n lexPane.config(state=\"disabled\")\n errorPane.config(state=\"disabled\")\n\n\ndef run_syntax():\n inputValue = editorPane.get(\"1.0\", \"end-1c\")\n lexPane.config(state=\"normal\")\n lexPane.delete('1.0', constants.END)\n errorPane.config(state=\"normal\")\n errorPane.delete('1.0', constants.END)\n syntax_error = syntax.parser(inputValue)\n print_error(syntax_error)\n lexPane.config(state=\"disabled\")\n errorPane.config(state=\"disabled\")\n\n\ndef print_lex(type, value): # Print Text to Lexical Pane\n lexPane.config(state=\"normal\")\n lexPane.delete('1.0', constants.END)\n lexPane.insert(constants.END, \"LEXEME\\t\\t\\tTOKEN\\n\\n\")\n for i in range(len(type)):\n if type[i] == 'lex-error' or type[i] == 'newline' or type[i] == 'whitespace':\n continue\n else:\n lexPane.insert(\n constants.END, f'{str(value[i]) if len(str(value[i]))<=15 else str(value[i])[:10] + \"...\"}\\t\\t\\t{str(type[i])}\\n')\n\n\ndef print_error(error):\n errorPane.config(state=\"normal\")\n errorPane.delete('1.0', constants.END)\n for err in range(len(error)):\n if err+1 == len(error):\n if error[err] != '':\n errorPane.insert(constants.END, f'{error[err]}\\n')\n else:\n if error[err] != '':\n if error[err] != error[err+1]:\n errorPane.insert(constants.END, f'{error[err]}\\n')\n else:\n continue\n\n\ndef refresh():\n editorPane.config(state=\"normal\")\n lexPane.config(state=\"normal\")\n errorPane.config(state=\"normal\")\n editorPane.delete('1.0', constants.END)\n lexPane.delete('1.0', constants.END)\n errorPane.delete('1.0', constants.END)\n lexPane.config(state=\"disabled\")\n errorPane.config(state=\"disabled\")\n\n\nwindow = Tk()\n\nwindow.geometry(\"939x617\")\nwindow.resizable(False, False)\nwindow.title(\"Bind Compiler\")\nwindow.iconbitmap(\"assets/logo.ico\")\nwindow.configure(bg=\"#E3E3E3\")\ncanvas = Canvas(\n window,\n bg=\"#E3E3E3\",\n height=617,\n width=939,\n bd=0,\n highlightthickness=0,\n relief=\"ridge\")\ncanvas.place(x=0, y=0)\n\n# Title Bar\ntitlebar_img = PhotoImage(file=f\"assets/titlebar.png\")\ntitlebar = canvas.create_image(\n 469.5, 23,\n image=titlebar_img)\ntitlebarIcon_img = PhotoImage(file=f\"assets/logo.png\")\ntitleIcon = canvas.create_image(\n 20, 22,\n image=titlebarIcon_img,\n)\ntitle = canvas.create_text(\n 70, 22,\n text=\"Bind\",\n font=('Open Sans ExtraBold', 20),\n fill=\"#211B36\",\n)\n\n# Buttons\nrunIcon_img = PhotoImage(file=f\"assets/run.png\")\nlexicalBtn = Button(\n image=runIcon_img,\n compound=LEFT,\n bg=\"#CDDCE1\",\n borderwidth=0,\n highlightthickness=0,\n activebackground=\"#211B36\",\n fg=\"#079AD2\",\n text=\" Lexical Analyzer\",\n font=('Open Sans', 10),\n activeforeground=\"#FFFFFF\",\n justify=\"center\",\n command=run_lex,\n)\nlexicalBtn.place(\n x=13, y=57,\n width=148,\n height=30,\n)\n# semButton.place(\n# x=200, y=52,\n# width=200,\n# height=30)\nsyntaxBtn = Button(\n image=runIcon_img,\n compound=LEFT,\n bg=\"#CDDCE1\",\n borderwidth=0,\n highlightthickness=0,\n activebackground=\"#211B36\",\n fg=\"#079AD2\",\n text=\" Syntax Analyzer\",\n font=('Open Sans', 10),\n activeforeground=\"#FFFFFF\",\n justify=\"center\",\n command=run_syntax,\n)\nsyntaxBtn.place(\n x=170, y=57,\n width=148,\n height=30,\n)\n\n\n# Editor Pane\neditorPane = Text(\n bd=0,\n bg=\"#F0F0F0\",\n highlightthickness=0,\n fg=\"#211B36\",\n padx=10,\n pady=10,\n font=('Open Sans', 10),\n)\n\neditorPane.place(\n x=13, y=100,\n width=577,\n height=324\n)\n\n\n# Lexeme Table Pane\nlexPane = Text(\n bd=0,\n bg=\"#E9E7E7\",\n highlightthickness=0,\n fg=\"#211B36\",\n padx=10,\n pady=10,\n font=('Open Sans', 10),\n state=\"disabled\",\n)\n\nlexPane.place(\n x=616, y=100,\n width=297,\n height=487,\n)\n\n# Error Pane\nerrorPane = Text(\n bd=0,\n bg=\"#E9E7E7\",\n highlightthickness=0,\n fg=\"#211B36\",\n padx=10,\n pady=10,\n font=('Open Sans', 10),\n state=\"disabled\",)\n\nerrorPane.place(\n x=13, y=445,\n width=577,\n height=152,\n)\n\nscrollBar = Scrollbar(editorPane, orient='vertical', command=editorPane.yview)\nscrollBar.pack(side=RIGHT, fill=Y)\n\n# communicate back to the scrollbar\neditorPane['yscrollcommand'] = scrollBar.set\n\nwindow.mainloop()\n","repo_name":"scaredmeow/py-compiler-bind","sub_path":"app/bind.py","file_name":"bind.py","file_ext":"py","file_size_in_byte":4764,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"}
+{"seq_id":"1901204918","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[4]:\n\n\nfrom cv2 import VideoCapture,CAP_PROP_FPS,imwrite\nimport os\n\n\n# In[5]:\n\n\ndef split_img(path):\n file_name = [i for i in os.listdir(path) if '.MP4' in i.upper() or '.AVI' in i.upper()]\n img_count = 0\n\n for i in file_name:\n cap = VideoCapture(path + '/' + i)\n fps = int(cap.get(CAP_PROP_FPS))\n count = 0\n while True:\n status, Frame = cap.read()\n if not status:\n break\n if not os.path.exists(path + '/img'):\n os.makedirs(path + '/img')\n\n if (count % fps == 0) or (count % fps == int(fps/2)):\n imwrite(path + '/img' + '/' + i.split('.')[0] + '_' + '0' * (8-len(str(img_count))) + str(img_count) + '.jpg', Frame)\n \n #imwrite(path + '/img' + '/' + '0' * (8-len(str(img_count))) + str(img_count) + '.jpg', Frame)\n img_count += 1\n print(\"Create Image : \",path + '/img' + '/' + i.split('.')[0] + '_' + '0' * (8-len(str(img_count))) + str(img_count) + '.jpg')\n count += 1\n\n\n# In[6]:\n\n\nprint(\"--Start split video to image--\")\nvideo_path = \"video\"\nif not os.path.exists(video_path):\n print(\"--Create Video folder--\")\n os.makedirs(video_path)\nelse:\n split_img(video_path)\n print(\"--Done !!--\")\n\n\n# In[ ]:\n\n\n\n","repo_name":"JackyWFLin/fab_label_tool","sub_path":"video_to_img.py","file_name":"video_to_img.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"19201087261","text":"from discord.embeds import Embed\nfrom discord.ext.commands import Cog, Bot, errors, BotMissingPermissions\nfrom traceback import format_exception\n\n\nclass Errors(Cog):\n def __init__(self, bot: Bot):\n super().__init__()\n if bot.config.get('log_channel') is None:\n raise KeyError('log channel is required')\n \n self.bot = bot\n self.channel = bot.get_channel(self.bot.config['log_channel'])\n \n def embed(self, title: str, message: str):\n return Embed(\n title=title,\n description=message,\n colour=0xe05151\n )\n \n async def log_error(self, err, ctx):\n path: str\n if ctx is None:\n path = '/'\n else:\n path = (str(ctx.guild.id) if ctx.guild is not None else 'DM') +'/'+ str(ctx.channel.id)\n \n show_trace = self.bot.config['log_trace']\n if show_trace:\n stack = format_exception(type(err), err, err.__traceback__)\n stack.pop() if len(stack) > 1 else None\n \n print(\n 'ERROR! %s\\n%s\\n\\nPATH: %s\\n%s' % (\n err.__class__.__name__, str(err), path,\n 'TRACE:\\n%s\\n' % '\\n'.join(stack) if show_trace else ''\n )\n )\n if self.channel is None:\n return\n \n return await self.channel.send(\n embed=self.embed(\n f'Error: {err.__class__.__name__}',\n '%s\\n\\n**Path:** %s\\n```\\n%s\\n```' % (\n str(err), path, '\\n'.join(stack) if show_trace else 'Stack Trace Disabled'\n )\n )\n )\n \n @Cog.listener()\n async def on_command_error(self, ctx, err):\n if isinstance(err, (errors.CommandNotFound, errors.DisabledCommand)):\n return\n \n if isinstance(err, errors.MissingRequiredArgument):\n arg = str(err).split('is')[0][:-1]\n count = len(ctx.command.name)\n if len(ctx.command.params) > 3:\n for p in ctx.command.params:\n count += len(p)\n \n count -= len(arg)\n \n spaces = [' ' for _ in range(count + 2)]\n bar = ''.join(spaces) + ('^' * len(arg))\n \n return await ctx.reply(\n 'Missing Arguments! `▱ᗢ\\n```\\n%s %s\\n%s\\n```' %\n (ctx.command, ctx.command.signature, bar)\n )\n \n if isinstance(err, BotMissingPermissions):\n if 'send_messages' in err.missing_permissions:\n try:\n return await ctx.author.send(\n embed=self.embed(\n 'Missing Permissions',\n 'I don\\'t have the __Send Messages__ permission for that channel!'\n ' If you\\'re sure that I do, contact support about this using'\n ' `@Vixen support`.'\n )\n )\n except:\n pass\n \n if (\n 'embed_links' in err.missing_permissions or\n 'attach_files' in err.missing_permissions\n ):\n return await ctx.reply(\n '**Error!**\\nI don\\'t have __Embed Links__ or __Attach Files__ permissions'\n ' for this channel!'\n )\n \n try:\n await ctx.reply(\n 'An unknown error occurred! My owner has been notified, please'\n ' contact support if this keeps happening using `@Vixen support`.'\n )\n except:\n pass\n \n return await self.log_error(err, ctx)\n\n\ndef setup(bot):\n bot.add_cog(Errors(bot))\n","repo_name":"devnote-dev/vixen","sub_path":"vixen/ext/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"2743024212","text":"from cgitb import handler\nfrom aws_cdk import (\n aws_lambda,\n aws_dynamodb,\n aws_events,\n aws_events_targets,\n Duration, Stack,\n aws_apigateway as _apigw\n)\nfrom constructs import Construct\nimport json\nimport yaml\n\nclass Sls(Stack):\n\n def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n with open('./serverless/manifest.yml') as file:\n #prime_service = yaml.safe_load(file)\n try:\n print(yaml.safe_load(file))\n except yaml.YAMLError as exc:\n print(exc)\n\n #print(prime_service['functions']['hello']['handler'])","repo_name":"paniji/pm-sa1","sub_path":"pm_sa1/serverless.py","file_name":"serverless.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"1383831838","text":"class Solution(object):\n def canConstruct(self, ransomNote, magazine):\n \"\"\"\n :type ransomNote: str\n :type magazine: str\n :rtype: bool\n \"\"\"\n for s in set(ransomNote):\n \tif ransomNote.count(s)>magazine.count(s):\n \t\treturn False\n return True \ns=Solution()\nprint(s.canConstruct(\"aabb\",\"asdfadasbbs\"))","repo_name":"Jeta1me1PLUS/learnleetcode","sub_path":"383/383RansomNote.py","file_name":"383RansomNote.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"74162881173","text":"from fabric.api import task\nfrom fabric.context_managers import settings\n\nfrom infrastructure.automation.functions import *\n\n\nclass CiAgent(object):\n DEFAULT_RESOURCES = ['docker']\n\n def __init__(self, assigned_services):\n self.__assigned_services = assigned_services\n\n def get_resources(self):\n return map(build_cache_resource_for_service, self.__assigned_services) + CiAgent.DEFAULT_RESOURCES\n\n\nclass EvenSpreadBuildAgentServiceAssignmentStrategy(object):\n def get_services_assigned_to_ci_agent(self, agent_index, services_managed_by_pipeline, ci_agent_count):\n assigned_services_count = self.__get_number_of_services_assigned_to_agent_with_index(\n agent_index,\n services_managed_by_pipeline,\n ci_agent_count\n )\n\n return map(\n lambda n: self.__get_nth_service_for_agent(agent_index, n, services_managed_by_pipeline, ci_agent_count),\n range(0, assigned_services_count))\n\n @staticmethod\n def __get_number_of_services_assigned_to_agent_with_index(agent_index, services_managed_by_pipeline,\n ci_agent_count):\n service_count = len(services_managed_by_pipeline)\n minimum_services_per_agent = service_count / ci_agent_count\n remainder_services = service_count % ci_agent_count\n return minimum_services_per_agent + 1 if agent_index < remainder_services else minimum_services_per_agent\n\n @staticmethod\n def __get_nth_service_for_agent(agent_index, n, services_managed_by_pipeline, ci_agent_count):\n return services_managed_by_pipeline[agent_index + ci_agent_count * n]\n\n\nclass Pipeline(object):\n def __init__(self, ci_agent_count, services_managed_by_pipeline, build_agent_service_assignment_strategy):\n self.__ci_agent_count = ci_agent_count\n self.__services_managed_by_pipeline = services_managed_by_pipeline\n self.__build_agent_service_assignment_strategy = build_agent_service_assignment_strategy\n self.__ci_agents = map(lambda agent_index: self.__create_ci_agent_at_index(agent_index),\n range(0, self.__ci_agent_count))\n\n def get_ci_agents(self):\n return self.__ci_agents\n\n def __create_ci_agent_at_index(self, agent_index):\n assigned_services = self.__build_agent_service_assignment_strategy.get_services_assigned_to_ci_agent(\n agent_index,\n self.__services_managed_by_pipeline,\n self.__ci_agent_count\n )\n\n return CiAgent(assigned_services)\n\n\ndef build_cache_resource_for_service(service):\n return service + '_build_cache'\n\n\n@task\ndef deploy(automation_agent_auto_register_key, automation_agent_root_cert_pem_content,\n automation_server_encrypted_keystore_content, availability_zones, aws_account_id, aws_region, ci_agent_count,\n contact_details, elsevier_cidrs, environment, global_remote_state_s3_bucket, global_remote_state_s3_key,\n global_remote_state_s3_region, public_ssh_key, services_managed_by_pipeline, ssrn_vpc_ip_addresses, vpc_cidr,\n build_monitor_login_credentials, dry_run='yes'):\n pipeline = Pipeline(\n int(ci_agent_count),\n services_managed_by_pipeline.split(','),\n EvenSpreadBuildAgentServiceAssignmentStrategy()\n )\n\n ci_agent_resources = [','.join(resources) for resources in\n [ci_agent.get_resources() for ci_agent in pipeline.get_ci_agents()]]\n configuration = {\n 'aws': {\n 'iam_role': {\n 'account_id': aws_account_id,\n 'short_name': 'pipeline_deployer',\n }\n },\n 'environment': environment,\n 'product': 'ssrn_pipeline',\n 'terraform': {\n 's3_remote_state': {\n 'region': aws_region,\n 'bucket': 'elsevier-ssrn_pipeline-{environment}'.format(environment=environment),\n 'key': 'pipeline.tfstate'\n },\n 'variables': {\n 'ansible_role_versions': terraform_command_line_map_variable_from({\n 'aptitude_package_recipient': '${APTITUDE_PACKAGE_RECIPIENT_ANSIBLE_ROLE_VERSION}',\n 'automater': '${AUTOMATER_ANSIBLE_ROLE_VERSION}',\n 'automation_agent': '${AUTOMATION_AGENT_ANSIBLE_ROLE_VERSION}',\n 'automation_server': '${AUTOMATION_SERVER_ANSIBLE_ROLE_VERSION}',\n 'aws_api_client': '${AWS_API_CLIENT_ANSIBLE_ROLE_VERSION}',\n 'aws_ssh_server': '${AWS_SSH_SERVER_ANSIBLE_ROLE_VERSION}',\n 'deployer': '${DEPLOYER_ANSIBLE_ROLE_VERSION}',\n 'container_factory': '${CONTAINER_FACTORY_ANSIBLE_ROLE_VERSION}',\n 'frontend_project_builder': '${FRONTEND_PROJECT_BUILDER_ANSIBLE_ROLE_VERSION}',\n 'ssrn_system_simulator': '${SSRN_SYSTEM_SIMULATOR_ANSIBLE_ROLE_VERSION}',\n 'java_project_builder': '${JAVA_PROJECT_BUILDER_ANSIBLE_ROLE_VERSION}',\n 'browser_test_runner': '${BROWSER_TEST_RUNNER_ANSIBLE_ROLE_VERSION}',\n 'long_running_host': '${LONG_RUNNING_HOST_ANSIBLE_ROLE_VERSION}',\n 'clock_synchronization_host': '${CLOCK_SYNCHRONIZATION_HOST_ANSIBLE_ROLE_VERSION}'\n }),\n 'automation_agent_auto_register_key': automation_agent_auto_register_key,\n 'automation_agent_root_cert_pem_content': automation_agent_root_cert_pem_content,\n 'automation_server_encrypted_keystore_content': automation_server_encrypted_keystore_content,\n 'availability_zones': terraform_command_line_list_variable_from(availability_zones.split(',')),\n 'build_monitor_login_credentials': build_monitor_login_credentials,\n 'ci_agent_count': ci_agent_count,\n 'ci_agent_resources': terraform_command_line_list_variable_from(ci_agent_resources),\n 'aws_region': aws_region,\n 'contact_details': contact_details,\n 'elsevier_cidrs': terraform_command_line_list_variable_from(elsevier_cidrs.split(',')),\n 'environment': environment,\n 'global_remote_state_s3_bucket': global_remote_state_s3_bucket,\n 'global_remote_state_s3_key': global_remote_state_s3_key,\n 'global_remote_state_s3_region': global_remote_state_s3_region,\n 'public_ssh_key': public_ssh_key,\n 'ssrn_vpc_ip_addresses': terraform_command_line_list_variable_from(ssrn_vpc_ip_addresses.split(',')),\n 'vpc_cidr': vpc_cidr\n }\n }\n }\n\n deploy_to_aws_with_iam_role(configuration, (dry_run.lower() != 'no' and dry_run.lower() != 'n'))\n","repo_name":"ngelsevier/preprint","sub_path":"pipeline/pipeline/fabfile-deployment.py","file_name":"fabfile-deployment.py","file_ext":"py","file_size_in_byte":6820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"31279757400","text":"\"\"\"init\n\nRevision ID: 777972d6cc83\nRevises:\nCreate Date: 2021-08-03 20:12:31.939979\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"777972d6cc83\"\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n \"users\",\n sa.Column(\"id\", sa.BigInteger(), nullable=False),\n sa.Column(\"username\", sa.String(), nullable=True),\n sa.Column(\"is_active\", sa.Boolean(), server_default=\"true\", nullable=True),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n op.create_index(op.f(\"ix_users_id\"), \"users\", [\"id\"], unique=False)\n op.create_index(op.f(\"ix_users_username\"), \"users\", [\"username\"], unique=True)\n op.create_table(\n \"follows\",\n sa.Column(\"followee_id\", sa.BigInteger(), nullable=False),\n sa.Column(\"follower_id\", sa.BigInteger(), nullable=False),\n sa.ForeignKeyConstraint(\n [\"followee_id\"],\n [\"users.id\"],\n ),\n sa.ForeignKeyConstraint(\n [\"follower_id\"],\n [\"users.id\"],\n ),\n sa.PrimaryKeyConstraint(\"followee_id\", \"follower_id\"),\n )\n op.create_table(\n \"tweets\",\n sa.Column(\"id\", sa.BigInteger(), nullable=False),\n sa.Column(\"sender_id\", sa.BigInteger(), nullable=True),\n sa.Column(\"text\", sa.String(), nullable=True),\n sa.Column(\n \"timestamp\", sa.TIMESTAMP(), server_default=sa.text(\"now()\"), nullable=False\n ),\n sa.ForeignKeyConstraint(\n [\"sender_id\"],\n [\"users.id\"],\n ),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n op.create_index(op.f(\"ix_tweets_id\"), \"tweets\", [\"id\"], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f(\"ix_tweets_id\"), table_name=\"tweets\")\n op.drop_table(\"tweets\")\n op.drop_table(\"follows\")\n op.drop_index(op.f(\"ix_users_username\"), table_name=\"users\")\n op.drop_index(op.f(\"ix_users_id\"), table_name=\"users\")\n op.drop_table(\"users\")\n # ### end Alembic commands ###\n","repo_name":"repodevs/twisobox","sub_path":"app/alembic/versions/777972d6cc83_init.py","file_name":"777972d6cc83_init.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"36138085841","text":"import numpy as np\nfrom sklearn.cluster import KMeans\nimport collections\n\ncompare = lambda x, y: collections.Counter(x) == collections.Counter(y)\ndim = 0\n\n\ndef getCentroid(cluster):\n total = [0.0]*dim\n if len(cluster)==0:\n return total\n for p in cluster:\n for i in range(len(p)):\n total[i] += p[i]\n for i in range(len(total)):\n total[i] /= len(cluster)\n return total\n\ndef distance(x, y):\n return np.dot(np.array(x),np.array(y))\n\ndef reassignClusters(dataSet, centroids, clusters, index_clusters):\n for cl in clusters:\n cl = []\n for cl in index_clusters:\n cl = []\n for index in range(len(dataSet)): \n dists = [0]*len(centroids)\n for i in range(len(centroids)):\n dists[i] = distance(dataSet[index], centroids[i])\n c_indx = dists.index(min(dists))\n clusters[c_indx].append(dataSet[index])\n index_clusters[c_indx].append(index)\n\n\nK = 4\nif __name__ == '__main__':\n dataSet = np.load('data/doc_vec.npy').tolist()\n # print(type(dataSet))\n # print(type(dataSet[0]))\n dim = len(dataSet[0])\n\n clusters = []\n index_clusters = []\n k = 0\n while k < K:\n cluster = []\n clusters.append(cluster)\n index_cluster = []\n index_clusters.append(index_cluster)\n k += 1\n \n # Initially randomly assign points to clusters\n part = len(dataSet)//K\n i = 0\n for j in range(len(clusters)):\n for index in range(i,min(len(dataSet), i+part)):\n clusters[j].append(dataSet[index])\n index_clusters[j].append(index);\n i += part \n\n # calculate centroid for clusters\n centroids = []\n for j in range(K):\n centroids.append(getCentroid(clusters[j]))\n\n for j in range(K):\n clusters[j] = []\n\n for j in range(K):\n index_clusters[j] = []\n\n reassignClusters(dataSet, centroids, clusters, index_clusters)\n \n # continue till converge\n iteration = 0\n while True:\n iteration += 1\n # calculate centroid for clusters\n centroidsNew = []\n for j in range(K):\n centroidsNew.append(getCentroid(clusters[j]))\n\n isConverge = False\n for j in range(K):\n if compare(centroidsNew[j], centroids[j]):\n isConverge = True\n else:\n isConverge = False\n if isConverge:\n break\n\n for j in range(K):\n clusters[j] = []\n\n for j in range(K):\n index_clusters[j] = []\n\n reassignClusters(dataSet, centroidsNew, clusters, index_clusters)\n for j in range(K):\n centroids[j] = centroidsNew[j]\n # print(\"centroids: \" + str(centroids))\n \n for j in range(K): \n print(\"cluster \" + str(j) + \" size: \" + str(len(clusters[j])))\n\n\n print(\"Iteration :\" + str(iteration))\n\n for c in clusters:\n print(len(c)) \n\n for c in index_clusters:\n print(c) \n\n\n","repo_name":"AChin1311/TrendingEvent","sub_path":"code/kmeans_cos.py","file_name":"kmeans_cos.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"}
+{"seq_id":"24213175961","text":"\n# coding: utf-8\n\n# In[1]:\n\n\n'''\nstring inbuilt methods - functions that belong to class String\nsyntax: \"string\".function()\n''' \n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 18 12:22:33 2018\n\n@author: gopinath\n\"\"\"\n\n\n#format method\n\"Hi I am {}\".format(\"name\")\n\n\n# In[2]:\n\n\n#count number of a's in string\n\"Happy Birthday\".count(\"a\")\n\n\n# In[3]:\n\n\n#count number of a's in string\nx=\"Happy Birthday Ganesh\"\nx.count(\"a\")\n\n\n# In[4]:\n\n\nx = \"HAPPY BIRTHDAY\"\n\n#lower method\nx.lower()\n\n\n# In[5]:\n\n\n#x is immuatable, that is it cannot be changed but only it can be replaced\n#x is still in Orignal Upper case\nx\n\n\n# In[6]:\n\n\nx=x.lower()\n\n\n# In[7]:\n\n\n#x is immuatable, that is it cannot be changed but only it can be replaced\nprint(x)\n\n\n# In[8]:\n\n\n#capitalize\nx.capitalize()\n\n\n# In[9]:\n\n\n#Title\nx.title()\n\n\n# In[10]:\n\n\n#lowercase -- gives condition TRUE OR FALSE\nx.islower()\n\n\n# In[11]:\n\n\n#\nx.isupper()\n\n\n# In[12]:\n\n\n#is alpha is false because space between HAPPY & BIRTHDAY is considered as digit\nx.isalpha()\n\n\n\n\n# In[13]:\n\n\n#space is not alphabet its digit\n\" \".isalpha()\n\n\n# In[14]:\n\n\n\"abv\".isalpha()\n\n\n# In[15]:\n\n\n\"1234\".isdigit()\n\n\n# In[16]:\n\n\nx=\"Bonus\"\n#postion of o in string\nx.index(\"o\")\n\n\n# In[17]:\n\n\nx.find(\"ou\")\n\n\n# In[18]:\n\n\n#Big O is missing\nx.find(\"O\")\n\n\n# In[19]:\n\n\nx.find(\"o\")\n\n\n# In[ ]:\n\n\n#2 space + john =6 characters\nname =input(\"what is your name?\")\nprint(len(name))\n\n\n# In[ ]:\n\n\n#strip fiction would remove all spaces\nname =input(\"what is your name?\").strip()\nprint(len(name))\n\n","repo_name":"gopi-123/python_projects","sub_path":"strings_methods.py","file_name":"strings_methods.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"}
+{"seq_id":"31749711303","text":"class Solution:\n def roman_to_int(self, s: str) -> int:\n dct = {'I': 1,\n 'V': 5,\n 'X': 10,\n 'L': 50,\n 'C': 100,\n 'D': 500,\n 'M': 1000}\n result_int = 0\n s = s.replace('IV', 'IIII').replace('IX', 'VIIII')\n s = s.replace('XL', 'XXXX').replace('XC', 'LXXXX')\n s = s.replace('CD', 'CCCC').replace('CM', 'DCCCC')\n\n for i in range(len(s)):\n result_int += dct[s[i]]\n\n return result_int\n\n\na = Solution()\nprint(a.roman_to_int('XXXI'))\n","repo_name":"Flopp30/leet_code","sub_path":"easy/RomanToInt.py","file_name":"RomanToInt.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"37496554601","text":"from heroes import *\nfrom places import *\n\n\ndef save_the_place(hero: SuperHero, place: Place):\n hero.find(place)\n hero.attack()\n if hero.can_use_ultimate_attack:\n hero.ultimate()\n place.media_message(hero)\n\n\nif __name__ == '__main__':\n save_the_place(Superman(), Tokyo())\n\n print('-' * 20)\n save_the_place(SuperHero('Terminator', False), Kostroma())\n\n print('-' * 20)\n save_the_place(TexasRanger(), Moscow())\n","repo_name":"StaceyMarkiv/Interesting_tasks","sub_path":"solid_code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"38877312702","text":"'''\nHow to solve: 내 손으로 풀이\n시간복잡도: O(N)\n공간복잡도: O(N)\n풀이 시간: 시간 초과 -> why? 일일이 로직짜는데 시간이 오래 걸림..\n다시 생각해보기 -> 일단 넣고 그다음 min 함수로 잘라내는 게 낫지 않을까\n\nor 로직 분리 -> dict에 넣을 때 처음부터 순서 다 배열해서 넣는게 아니라\n일단 각 노래별로 [인덱스, 재생횟수]를 넣고난 다음\n그 안에서 재생횟수 별로 정렬하는 것이 더 간결했을 듯. \n'''\n# 1. my solution\n\nfrom collections import defaultdict\ndef solution(genres, plays):\n length = len(genres)\n dic = defaultdict(list)\n set_genres = set(genres)\n for i in set_genres:\n dic[i] = [-1, -1, 0] # 가장 큰 값 인덱스, 가장 작은 값 인덱스, 재생 횟수\n\n # dic에 다 넣음\n\n for i in range(length): \n this_gen = genres[i]\n this_play = plays[i]\n first_song_idx = dic[this_gen][0]\n second_song_idx = dic[this_gen][1]\n\n dic[this_gen][2] += this_play\n # 처음\n if first_song_idx == -1 and second_song_idx == -1:\n dic[this_gen][0] = i\n elif first_song_idx != -1 and second_song_idx == -1:\n if this_play > plays[first_song_idx]:\n dic[this_gen][0], dic[this_gen][1] = i, first_song_idx\n else: # 작거나 같다면 뒤로 밀려나야 된다.\n dic[this_gen][1] = i\n elif first_song_idx != -1 and second_song_idx != -1:\n if plays[first_song_idx] > plays[second_song_idx]:\n if this_play > plays[first_song_idx]:\n dic[this_gen][0], dic[this_gen][1] = i, first_song_idx\n elif this_play == plays[first_song_idx]:\n dic[this_gen][1] = i\n elif this_play < plays[first_song_idx]:\n if this_play > plays[second_song_idx]:\n dic[this_gen][1] = i\n else:\n continue\n elif plays[first_song_idx] == plays[second_song_idx]:\n if this_play > plays[first_song_idx]:\n dic[this_gen][0], dic[this_gen][1] = i, first_song_idx\n elif this_play == plays[first_song_idx]:\n continue\n elif this_play < plays[first_song_idx]:\n if this_play > plays[second_song_idx]:\n dic[this_gen][1] = i\n else:\n continue\n dic_to_li = list(zip(dic.keys(),dic.values()))\n dic_to_li.sort(key= lambda x: -x[1][2])\n\n answer = []\n\n for i in dic_to_li:\n first = i[1][0]\n second = i[1][1]\n if second == -1:\n answer.append(first)\n else:\n answer.append(first)\n answer.append(second)\n\n return answer\n\n\n# 2. 다른 풀이: 매우 간결 but 실용적이지는 않은듯.\n# 참고할 부분: zip, range(len(plays)) => 인덱스 뽑아내기\n# 람다 사용 부분도 좋았음.\ndef solution(genres, plays):\n answer = []\n d = {e:[] for e in set(genres)}\n for e in zip(genres, plays, range(len(plays))): # range(len(plays)): 인덱스 넣어주기!\n d[e[0]].append([e[1] , e[2]])\n genreSort =sorted(list(d.keys()), key= lambda x: sum( map(lambda y: y[0],d[x])), reverse = True)\n for g in genreSort:\n temp = [e[1] for e in sorted(d[g],key= lambda x: (x[0], -x[1]), reverse = True)]\n answer += temp[:min(len(temp),2)]\n return answer\n\ngenres = [\"classic\", \"pop\", \"classic\", \"classic\", \"pop\"]\nplays = [500, 600, 150, 800, 2500]\nprint(solution(genres, plays))\n\n# 3. 다른 풀이: 너무 함축적이지 않으면서 이해가능한 코드. 가장 좋은듯.\n\ndef solution(genres, plays):\n genres_dict = {}\n genres_list = []\n \n # 처음에 [인덱스, 재생횟수]별로 딕셔너리에 값 넣기\n for i in range(len(genres)):\n if genres[i] not in genres_dict:\n genres_dict[genres[i]] = []\n genres_dict[genres[i]].append([i, plays[i]])\n\n # 딕셔너리 내에서 각 장르별로 리스트 내 정렬 -> 다른 리스트에 해당 장르 전체 재생횟수 for문으로 돌려서 넣고\n for g in genres_dict:\n genres_dict[g].sort(key=lambda x: x[1], reverse=True)\n genres_list.append([g, sum([play for _, play in genres_dict[g]])])\n # 리스트 정렬 후에 \n genres_list.sort(key=lambda x: x[1], reverse=True)\n answer = []\n for g, _ in genres_list:\n answer.extend([x[0] for x in genres_dict[g][:2]])\n return answer","repo_name":"woonys/coding__test","sub_path":"programmers/고득점_kit/해시/베스트앨범.py","file_name":"베스트앨범.py","file_ext":"py","file_size_in_byte":4588,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"24448924732","text":"import time\nimport random\nfrom selenium import webdriver\nimport selenium.common.exceptions as exceptions\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom bs4 import BeautifulSoup\nfrom datetime import date\nimport pandas as pd\nimport boto3\nfrom io import StringIO\nfrom Keys import KEY_ID, SECRET_KEY\n\n\ndef connect_to_indeed():\n global driver\n options = webdriver.ChromeOptions()\n options.add_argument(\"start-maximized\")\n options.add_experimental_option(\"excludeSwitches\", [\"enable-automation\"])\n options.add_experimental_option(\"detach\", True)\n\n try:\n driver = webdriver.Chrome(options=options,\n executable_path=r\"C:\\Chrome_Driver\\chromedriver.exe\")\n except exceptions.WebDriverException:\n print(\"Chrome driver is not available.\")\n try:\n driver.get(\"https://ca.indeed.com/jobs?q=full+time&l=Canada\")\n WebDriverWait(driver, 30).until(EC.element_to_be_clickable((By.XPATH, \"//button[text()='Find jobs']\")))\n except ImportError:\n print(\"Cannot open website\")\n\n\ndef set_options():\n driver.find_element(By.ID, \"filter-dateposted\").click()\n time.sleep(1)\n driver.find_element(By.XPATH, \"//a[text()='Last 24 hours']\").click()\n time.sleep(1)\n driver.find_element(By.CSS_SELECTOR, \".popover-x-button-close.icl-CloseButton\").click()\n time.sleep(1)\n driver.find_element(By.ID, \"filter-taxo1\").click()\n time.sleep(1)\n driver.find_element(By.XPATH, \"//a[contains(text(), 'Technology Occupations')]\").click()\n\n\ndef get_page_numbers():\n temp_url = driver.current_url\n driver.get(f'{temp_url}&start=999999')\n number = driver.find_element(By.XPATH, \"//b[@aria-label]\").text\n time.sleep(3)\n driver.get(temp_url)\n time.sleep(3)\n print('Need to scrape ' + str(number) + ' pages')\n return number\n\n\ndef scrape_job_info(pages):\n job_list = []\n temp_url = driver.current_url\n for i in range(int(pages)):\n # for i in range(1):\n driver.get(f'{temp_url}&start={str(i)}0')\n soup = BeautifulSoup(driver.page_source, 'html.parser')\n for info in soup.find_all('div', class_='slider_item'):\n temp_dict = {}\n temp_dict['title'] = info.find('h2').getText().strip(\"new\")\n temp_dict['company'] = info.find('span', {'class': 'companyName'}).getText()\n if info.find('span', {'class': 'ratingNumber'}):\n temp_dict['rating'] = info.find('span', {'class': 'ratingNumber'}).getText()\n else:\n temp_dict['rating'] = 'N/A'\n if '•' in info.find('div', {'class': 'companyLocation'}).getText():\n temp_dict['location'] = info.find('div', {'class': 'companyLocation'}).getText().split('•')[0]\n temp_dict['remote'] = info.find('div', {'class': 'companyLocation'}).getText().split('•')[1]\n else:\n temp_dict['location'] = info.find('div', {'class': 'companyLocation'}).getText()\n temp_dict['remote'] = 'On-Site'\n if info.find('div', {'class': 'salary-snippet'}):\n temp_dict['salary'] = info.find('div', {'class': 'salary-snippet'}).getText()\n else:\n temp_dict['salary'] = 'N/A'\n temp_dict['date'] = str(date.today())\n job_list.append(temp_dict)\n time.sleep(random.randint(5, 15))\n return job_list\n\n\ndef data_transform(job_list):\n s3 = boto3.resource(\n service_name='s3',\n region_name='us-east-1',\n aws_access_key_id=KEY_ID,\n aws_secret_access_key=SECRET_KEY\n )\n job_df = pd.DataFrame(job_list)\n bucket = s3.Bucket('wcd-landing-zone')\n csv_buffer = StringIO()\n job_df.to_csv(csv_buffer, header=True, index=False)\n file_name = \"jobs_\" + str(date.today()) + \".csv\"\n s3.Object(bucket.name, file_name).put(Body=csv_buffer.getvalue())\n\n\nif __name__ == '__main__':\n connect_to_indeed()\n set_options()\n page_number = get_page_numbers()\n jobs = scrape_job_info(page_number)\n data_transform(jobs)\n","repo_name":"gaohongyang/WCD_Mid","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"73414716692","text":"import pytest\n\nfrom core.layers import TypeNamingIterator, BaseFieldsBuilder, NullFieldsBuilder, AttributeFieldsDecorator, LodFieldsDecorator, SemanticSurfaceFieldsDecorator\n\ntwo_cubes_citymodel = {\"CityObjects\":{\"id-1\":{\"geometry\":[{\"boundaries\":[[[0,1,2,3]],[[7,4,0,3]],[[4,5,1,0]],[[5,6,2,1]],[[3,2,6,7]],[[6,5,4,7]]],\"lod\":1,\"type\":\"MultiSurface\"}],\"type\":\"GenericCityObject\"}},\"type\":\"CityJSON\",\"version\":\"0.9\",\"vertices\":[[1.0,0.0,1.0],[0.0,1.0,1.0],[-1.0,0.0,1.0],[0.0,-1.0,1.0],[1.0,0.0,0.0],[0.0,1.0,0.0],[-1.0,0.0,0.0],[0.0,-1.0,0.0]],\"metadata\":{\"geographicalExtent\":[-1.0,-1.0,0.0,1.0,1.0,1.0]}}\ncitymodel_with_attributes = {\"type\":\"CityJSON\",\"version\":\"0.9\",\"CityObjects\":{\"id-1\":{\"type\":\"Building\",\"attributes\":{\"attribute1\":1,\"attribute2\":2}},\"id-2\":{\"type\":\"Building\",\"attributes\":{\"attribute1\":1,\"attribute3\":2}}}}\n\nclass TestTypeNamingIterator:\n \"\"\"A class to test the TypeNamingIterator class\"\"\"\n\n def test_list_of_layer_names(self):\n \"\"\"Tests if the amount of types found in the two cubes example CityJSON\n is identified and named properly\n \"\"\"\n type_naming_iter = TypeNamingIterator(\"two_cubes\", two_cubes_citymodel)\n\n layers = list(type_naming_iter.all_layers())\n assert len(layers) == 1\n assert layers[0] == \"two_cubes - GenericCityObject\"\n\nclass TestFieldBuilders:\n \"\"\"A class to test the field builders\"\"\"\n\n def test_base_field_builder(self):\n \"\"\"Tests that BaseFieldsBuilder builds the uid and type fields\"\"\"\n builder = BaseFieldsBuilder()\n\n fields = builder.get_fields()\n\n assert len(fields) == 2\n assert fields[0].name() == \"uid\"\n assert fields[1].name() == \"type\"\n \n def test_attributes_fields_builder(self):\n \"\"\"Tests that AttributeFieldsDecorator creates the attributes of the model\"\"\"\n builder = NullFieldsBuilder()\n builder = AttributeFieldsDecorator(builder, citymodel_with_attributes)\n\n fields = builder.get_fields()\n field_names = [field.name() for field in fields]\n\n assert len(fields) == 3\n assert \"attribute.attribute1\" in field_names\n assert \"attribute.attribute2\" in field_names\n assert \"attribute.attribute3\" in field_names\n \n def test_lod_fields_builder(self):\n \"\"\"Tests that LodFieldsBuilder create the lod field\"\"\"\n builder = NullFieldsBuilder()\n builder = LodFieldsDecorator(builder)\n\n fields = builder.get_fields()\n\n assert len(fields) == 1\n assert fields[0].name() == \"lod\"\n \n def test_semantic_surface_fields_builder(self):\n \"\"\"Tests that LodFieldsBuilder create the lod field\"\"\"\n builder = NullFieldsBuilder()\n builder = SemanticSurfaceFieldsDecorator(builder)\n\n fields = builder.get_fields()\n\n assert len(fields) == 1\n assert fields[0].name() == \"semantic_surface\"\n","repo_name":"cityjson/cityjson-qgis-plugin","sub_path":"tests/test_layers.py","file_name":"test_layers.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"67"}
+{"seq_id":"3378695561","text":"cartItems = []\n# print(cartItems)\ncartPrices = []\n# print(cartPrices)\n\n\ndef add_to_cart(items, cartItems, cartPrices):\n userItem = input(\"Please enter an item to add to your cart\")\n try: # try except block to handle incorrect user input\n # print(items[userItem])\n cartItems.append(userItem) # adds user input along with the price\n # print(cartItems)\n cartPrices.append(items[userItem])\n # print(cartPrices)\n except KeyError:\n # error validation for if the user gives an input that does not match the item list\n print(\"Not found\")\n","repo_name":"RakhinAmin/Shopping-Cart-Python","sub_path":"ShoppingCartPython/addToCart.py","file_name":"addToCart.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"26682722631","text":"import os, sys, math, time\nimport numpy as np\nfrom collections import Counter\n\nsys.path.append(\"../IAD-Generator/iad-generation/\")\nfrom csv_utils import read_csv\n\nfrom sklearn import metrics\nfrom sklearn.linear_model import SGDClassifier\n\nimport scipy\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom itr_sklearn import ITR_Extractor\n\nimport itr_parser\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.preprocessing import MinMaxScaler\n\nfrom multiprocessing import Pool, Process\n\nfrom sklearn.pipeline import Pipeline\n\nif (sys.version[0] == '2'):\n\timport cPickle as pickle\nelse:\n\timport pickle\n\n\n#### EVENT TO ITR #####\n\ndef extract_wrapper(ex):\n\t''' extract the ITRs from a single event binary file. The output is saved to the\n\tsp_path directory. '''\n\n\t#print(\"begin extract\")\n\tout = itr_parser.extract_itr_seq_into_counts(ex['b_path'])\n\t#print(\"end extract\")\n\tprint(\"out:\", out.shape)\n\tout = out.reshape(-1).astype(np.uint8)\n\tprint(\"out:\", out.shape)\n\n\tnp.save(ex['sp_path'], out)\n\n\treturn ex['sp_path']\n\ndef convert_event_to_itr(csv_contents, num_procs=1, empty_locs=[]):\n\t''' convert a binary event file to a list of ITRs. This fnuction is done\n\tvie multiple concurrent process calls to \"extract_wrapper\" '''\n\n\tt_s = time.time()\n\tpool = Pool(num_procs)\n\n\tprint(\"convert_event_to_itr: \", len(csv_contents))\n\tfor i, c in enumerate(pool.imap_unordered( extract_wrapper, csv_contents, chunksize=10 )):\n\t\tif(i % 10 == 0):\n\t\t\tprint(\"elapsed time {0}: {1}\".format(i, time.time()-t_s))\n\tpool.close()\n\tpool.join()\n\n#### PRE-PROCESS ITR #####\n\ndef tfidf_and_scale(ex_list):\n\t''' extract the ITRs from a single event binary file. The output is saved to the\n\tsp_path directory. '''\n\tprint(\"len(ex_list):\", len(ex_list))\n\n\n\ttfidf = pickle.load(open(\"tfidf\"+'.pk', \"rb\"))\n\tscaler = pickle.load(open(\"scaler\"+'.pk', \"rb\"))\n\n\t# open ex as sparse format\n\tfor i, ex in enumerate(ex_list):\n\t\t#print(ex[\"example_id\"])\n\t\tif(i % 1000 == 0):\n\t\t\tprint(\"elapsed time {0}: {1}\".format(i, len(ex_list)))\n\n\t\tdata = np.load(ex['sp_path'])\n\n\t\tidx = np.nonzero(data)[0]\n\t\tvalue = data[idx]\n\n\t\tdata = zip(idx, value)\n\n\t\t# Apply pre-processing to sparse data\n\t\tdata = tfidf.transform(data)\n\n\t\t# format data as dense\n\t\tunzipped_data = np.array(zip(*(data[0])))\t\t\n\t\tdata = np.zeros(128*128*7)\n\t\tdata[unzipped_data[0].astype(np.int32)] = unzipped_data[1]\n\t\tdata = data.reshape(1, -1)\n\n\t\t# Apply pre-processing to dense data\n\t\tdata = scaler.transform(data)\n\n\t\tdata = data.reshape(-1)\n\n\t\t#save data as a sparse matrix for efficiency? \n\t\tdata = scipy.sparse.coo_matrix(np.array(data))\n\t\tscipy.sparse.save_npz(ex['pp_path'], data)\n\n\t#return ex['pp_path']\n\ndef pre_process_itr(csv_contents, num_procs=1, empty_locs=[]):\n\t''' convert a binary event file to a list of ITRs. This fnuction is done\n\tvie multiple concurrent process calls to \"extract_wrapper\" '''\n\n\tt_s = time.time()\n\n\tchunk_size = len(csv_contents)/float(num_procs)\n\tchunk_size = int(math.ceil(chunk_size))\n\n\t#print(\"chunk_size:\", chunk_size)\n\n\tprocs = []\n\tfor i in range(num_procs):\n\t\tchunk = csv_contents[i*chunk_size:i*chunk_size+chunk_size]\n\t\tp = Process(target=tfidf_and_scale, args=(chunk,))\n\t\tp.start()\n\n\tfor i in range(num_procs):\n\t\tp.join()\n\t\n\n\t'''\n\tpool = Pool(num_procs)\n\tfor i, c in enumerate(pool.imap_unordered( tfidf_and_scale, csv_contents, chunksize=10 )):\n\t\tif(i % 1000 == 0):\n\t\t\tprint(\"elapsed time {0}: {1}\".format(i, time.time()-t_s))\n\tpool.close()\n\tpool.join()\n\t'''\n\n#### FILE I/O ####\n\ndef get_filenames(dataset_dir, model_type, dataset_type, dataset_id, layer):\n\tfile_path = os.path.join(dataset_dir, 'b_{0}_{1}_{2}'.format(model_type, dataset_type, dataset_id))\n\t\n\ttrain_filename = os.path.join(file_path, 'train_{0}_{1}.npz'.format(dataset_id, layer))\n\ttest_filename = os.path.join(file_path, 'test_{0}_{1}.npz'.format(dataset_id, layer))\n\ttrain_label_filename = os.path.join(file_path, 'train_label_{0}_{1}.npy'.format(dataset_id, layer))\n\ttest_label_filename = os.path.join(file_path, 'test_label_{0}_{1}.npy'.format(dataset_id, layer))\n\t\n\treturn train_filename, test_filename, train_label_filename, test_label_filename\n\ndef retrieve_data(dataset_dir, model_type, dataset_type, dataset_id, layer):\n\tprint(\"Retrieving file data!\")\n\ttrain_filename, test_filename, train_label_filename, test_label_filename = get_filenames(dataset_dir, model_type, dataset_type, dataset_id, layer)\n\n\tdata_in = scipy.sparse.load_npz(train_filename)\n\tdata_label = np.load(train_label_filename)\n\n\teval_in = scipy.sparse.load_npz(test_filename)\n\teval_label = np.load(test_label_filename)\n\n\treturn data_in, data_label, eval_in, eval_label\n\n\ndef process_data(dataset_dir, model_type, dataset_type, dataset_id, #layer, \n\tcsv_filename, num_classes, num_procs):\n\tprint(\"Generating new files!\")\n\t\t\n\t#open files\n\ttry:\n\t\tcsv_contents = read_csv(csv_filename)\n\texcept:\n\t\tprint(\"ERROR: Cannot open CSV file: \"+ csv_filename)\n\n\tb_dir_name = os.path.join(dataset_dir, 'b_{0}_{1}_{2}'.format(model_type, dataset_type, dataset_id))\n\tsp_dir_name = os.path.join(dataset_dir, 'sp_{0}_{1}_{2}'.format(model_type, dataset_type, dataset_id))\n\tpp_dir_name = os.path.join(dataset_dir, 'pp_{0}_{1}_{2}'.format(model_type, dataset_type, dataset_id))\n\t\n\tif(not os.path.exists(sp_dir_name)):\n\t\tos.makedirs(sp_dir_name)\n\tif(not os.path.exists(pp_dir_name)):\n\t\tos.makedirs(pp_dir_name)\n\n\tprint(\"Organizing csv_contents\")\n\tfor ex in csv_contents:\n\t\tex['b_path'] = os.path.join(b_dir_name, '{0}.b'.format(ex['example_id']))\n\t\tex['sp_path'] = os.path.join(sp_dir_name, '{0}.npy'.format(ex['example_id']))\n\t\tex['pp_path'] = os.path.join(pp_dir_name, '{0}.npz'.format(ex['example_id']))\n\t\t\n\t\t'''\n\t\tex['b_path'] = os.path.join(b_dir_name, '{0}_{1}.b'.format(ex['example_id'], layer))\n\t\tex['sp_path'] = os.path.join(sp_dir_name, '{0}_{1}.npy'.format(ex['example_id'], layer))\n\t\tex['pp_path'] = os.path.join(pp_dir_name, '{0}_{1}.npz'.format(ex['example_id'], layer))\n\t\t'''\n\n\tdataset = [ex for ex in csv_contents if ex['label'] < num_classes]\n\t#print(\"dataset_length:\", len(dataset), len([x for x in os.listdir(sp_dir_name) if \"_3.\" in x]))\n\tprint(\"dataset_length:\", len(dataset), len(os.listdir(pp_dir_name)))\n\n\n\t#dataset = dataset[:41]\n\n\t# CONVERT BINARY EVENTS TO ITRS\n\tconvert_event_to_itr(dataset, num_procs=num_procs)\n\n\t# PRE-PROCESS ITRS\n\t#pre_process_itr(dataset, num_procs=num_procs)\n\t\n\n\n\n\n\nif __name__ == '__main__':\n\timport argparse\n\tparser = argparse.ArgumentParser(description='Generate IADs from input files')\n\t#required command line args\n\tparser.add_argument('model_type', help='the type of model to use', choices=['i3d', 'rn50', 'trn', 'tsm'])\n\n\tparser.add_argument('dataset_dir', help='the directory where the dataset is located')\n\tparser.add_argument('csv_filename', help='a csv file denoting the files in the dataset')\n\tparser.add_argument('dataset_type', help='the dataset type', choices=['frames', 'flow', 'both'])\n\tparser.add_argument('dataset_id', type=int, help='a csv file denoting the files in the dataset')\n\tparser.add_argument('num_classes', type=int, help='the number of classes in the dataset')\n\n\tparser.add_argument('--num_procs', type=int, default=1, help='number of process to split IAD generation over')\n\tparser.add_argument('--repeat', type=int, default=1, help='number of times to repeat training the model')\n\tparser.add_argument('--parse_data', type=bool, default=True, help='whether to parse the data again or load from file')\n\n\n\tFLAGS = parser.parse_args()\n\n\tif(FLAGS.model_type == 'i3d'):\n\t\tfrom gi3d_wrapper import DEPTH_SIZE, CNN_FEATURE_COUNT\n\tif(FLAGS.model_type == 'rn50'):\n\t\tfrom rn50_wrapper import DEPTH_SIZE, CNN_FEATURE_COUNT\n\tif(FLAGS.model_type == 'trn'):\n\t\tfrom trn_wrapper import DEPTH_SIZE, CNN_FEATURE_COUNT\n\tif(FLAGS.model_type == 'tsm'):\n\t\tfrom tsm_wrapper3 import DEPTH_SIZE, CNN_FEATURE_COUNT\n\n\tlayer = 0#DEPTH_SIZE-1\n\n\tprocess_data(FLAGS.dataset_dir, \n\t\t\tFLAGS.model_type, \n\t\t\tFLAGS.dataset_type, \n\t\t\tFLAGS.dataset_id, \n\t\t\t#layer, \n\t\t\tFLAGS.csv_filename, \n\t\t\tFLAGS.num_classes,\n\t\t\tFLAGS.num_procs)\n\t'''\n\tfor layer in range(DEPTH_SIZE):\n\t\tmain(FLAGS.dataset_dir, \n\t\t\tFLAGS.model_type, \n\t\t\tFLAGS.dataset_type, \n\t\t\tFLAGS.dataset_id, \n\t\t\tlayer, \n\t\t\tFLAGS.csv_filename, \n\t\t\tFLAGS.num_classes,\n\t\t\tFLAGS.num_procs)\n\t'''\n\t\n","repo_name":"AssistiveRoboticsUNH/TCG-Classifier","sub_path":"itr_process.py","file_name":"itr_process.py","file_ext":"py","file_size_in_byte":8207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"26066042732","text":"#!/usr/bin/env python3\nimport sys\nfrom collections.abc import Iterable\nfrom math import *\nfrom itertools import *\nfrom collections import *\nfrom functools import *\nfrom operator import *\ntry:\n from math import gcd\nexcept Exception:\n from fractions import gcd\n\n\ndef solve(N: int, w: \"List[int]\", s: \"List[int]\", v: \"List[int]\"):\n return 0\n\n\ndef main():\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n tokens = iterate_tokens()\n N = int(next(tokens)) # type: int\n w = [int()] * (N) # type: \"List[int]\" \n s = [int()] * (N) # type: \"List[int]\" \n v = [int()] * (N) # type: \"List[int]\" \n for i in range(N):\n w[i] = int(next(tokens))\n s[i] = int(next(tokens))\n v[i] = int(next(tokens))\n result = solve(N, w, s, v)\n if isinstance(result, Iterable) and not isinstance(result, str):\n result = '\\n'.join([str(v) for v in result])\n print(result)\n\nif __name__ == '__main__':\n main()\n","repo_name":"ar90n/lab","sub_path":"contest/atcoder/dp/X/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"}
+{"seq_id":"174978431","text":"import math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport datetime\n\ndef criteria_bar_plotter(RNN_result: list, KNN_result: list, LOCF_result: list, Hot_deck_result: list, criteria: str,\n feature: str, gap_indicators: list):\n if criteria == \"Mean squared error\":\n KNN_result = list(math.sqrt(i) for i in KNN_result)\n RNN_result = list(math.sqrt(i) for i in RNN_result)\n LOCF_result = list(math.sqrt(i) for i in LOCF_result)\n Hot_deck_result = list(math.sqrt(i) for i in Hot_deck_result)\n criteria = criteria.replace(criteria, \"Root \" + criteria)\n if criteria == \"Variance error\":\n KNN_result = list(np.abs(i) for i in KNN_result)\n RNN_result = list(np.abs(i) for i in RNN_result)\n LOCF_result = list(np.abs(i) for i in LOCF_result)\n Hot_deck_result = list(np.abs(i) for i in Hot_deck_result)\n criteria = criteria.replace(criteria, \"Absolute \" + criteria)\n\n x = np.arange(len(gap_indicators))\n fig, ax = plt.subplots(figsize=(12, 8))\n bar_width = 0.25\n\n bar1 = x\n bar2 = [i + bar_width for i in bar1]\n bar3 = [i + bar_width for i in bar2]\n bar4 = [i + bar_width for i in bar3]\n\n method_1 = ax.bar(bar1, KNN_result, bar_width, label='KNN', hatch='/')\n method_2 = ax.bar(bar2, LOCF_result, bar_width, label='LOCF', hatch='\\\\')\n method_3 = ax.bar(bar3, RNN_result, bar_width, label='RNN', hatch='|')\n method_4 = ax.bar(bar4, Hot_deck_result, bar_width, label='Hot Deck', hatch='-')\n\n ax.set_ylabel(criteria + ' scores')\n ax.set_title(criteria + ' score per gap size ' + feature)\n ax.set_xticks(x, gap_indicators)\n ax.legend()\n\n ax.bar_label(method_1, padding=3)\n ax.bar_label(method_2, padding=3)\n ax.bar_label(method_3, padding=3)\n ax.bar_label(method_4, padding=3)\n\n fig.tight_layout()\n plt.grid(visible=True, axis='y')\n time = datetime.datetime.now()\n plt.savefig(\n criteria + '_per_gap_' + feature + \"_\" + f\"{time.day}_{time.hour}_{time.minute}_{time.second}\" + \".png\")\n plt.show()","repo_name":"Sumcloud/Portfolio_Applied_Data_Science","sub_path":"Project Notebooks/Visuals/criteria_grouped_bar_plotter.py","file_name":"criteria_grouped_bar_plotter.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"5192966296","text":"import json\nfrom polly import text_to_voice\n\n# ====== read sentences from JSON\nsentences = json.load(open('src/app/config/rs.json'))\nprint(len(sentences))\n\n# ====== text to voice ======\n\n\ndef save_voice(number, response_list):\n result = []\n for idx, response in enumerate(response_list):\n data = response.get('AudioStream').read()\n pathname = str(number) + '_' + str(idx) + '.mp3'\n filename = 'data/rs/' + pathname\n f = open(filename, 'wb')\n f.write(data)\n f.close()\n print('built: ' + filename)\n result.append(pathname)\n return result\n\nrs_result = []\n\nfor index, sentence in enumerate(sentences):\n response_list = text_to_voice(str(sentence))\n pathnames = save_voice(index, response_list)\n sent_ret = dict(\n sentence=sentence,\n audioURLs=pathnames\n )\n rs_result.append(sent_ret)\n print(sent_ret)\n\nwith open('data/rs/data.json', 'w') as outfile:\n json.dump(rs_result, outfile)\n","repo_name":"neekey/www.listenpte.com","sub_path":"opt/run_rs.py","file_name":"run_rs.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"39131614735","text":"from konlpy.tag import Okt\nfrom collections import Counter\nfrom docx import Document\nfrom docx.enum.text import WD_COLOR_INDEX\nimport pandas as pd\nimport sys\nimport copy\nimport re\n\n\n\ndef get_tags(text, ntags=10):\n spliter = Okt()\n # konlpy의 Twitter객체\n nouns = spliter.nouns(text)\n # nouns 함수를 통해서 text에서 명사만 분리/추출\n count = Counter(nouns)\n # Counter객체를 생성하고 참조변수 nouns할당\n return_list = [] # 명사 빈도수 저장할 변수\n for n, c in count.most_common(ntags):\n temp = {'tag': n, 'count': c}\n if len(temp['tag'])>=2 :\n return_list.append(temp)\n # most_common 메소드는 정수를 입력받아 객체 안의 명사중 빈도수\n # 큰 명사부터 순서대로 입력받은 정수 갯수만큼 저장되어있는 객체 반환\n # 명사와 사용된 갯수를 return_list에 저장합니다.\n return return_list\n\n\ndef main():\n text_file_name ='/Users/eunsung/PycharmProjects/ProjectFile/add.txt'\n # 분석할 파일\n noun_count = 10\n # 최대 많은 빈도수 부터 10개 명사 추출\n output_file_name = '/Users/eunsung/PycharmProjects/ProjectFile/counter.txt'\n # counter.txt 에 저장\n open_text_file = open(text_file_name, 'r', -1, \"utf-8\")\n # 분석할 파일을 open\n text = open_text_file.read() # 파일을 읽습니다.\n tags = get_tags(text, noun_count) # get_tags 함수 실행\n open_text_file.close() # 파일 close\n open_output_file = open(output_file_name, 'w', -1, \"utf-8\")\n # 결과로 쓰일 count.txt 열기\n for tag in tags:\n noun = tag['tag']\n count = tag['count']\n open_output_file.write('{} {}\\n'.format(noun, count))\n # 결과 저장\n open_output_file.close()\n\n\n\n\ndef highlight_text(filename, find):\n doc = Document(filename)\n for paragraph in doc.paragraphs:\n if find in paragraph.text:\n for run in paragraph.runs:\n if find in run.text:\n x = run.text.split(find)\n run.clear()\n for i in range(len(x) - 1):\n run.add_text(x[i])\n run.add_text(find)\n run.font.highlight_color = WD_COLOR_INDEX.YELLOW\n doc.save('/Users/eunsung/PycharmProjects/ProjectFile/blog8.docx')\n return 1\n\ndef highlight2(filename, find):\n\n source = filename\n phrase = find\n\n doc = Document(source)\n\n for para in doc.paragraphs :\n start = para.text.find(phrase)\n if start > -1 :\n pre = para.text[:start]\n post = para.text[start+len(phrase):]\n para.text = pre\n para.add_run(phrase)\n para.runs[1].font.highlight_color = WD_COLOR_INDEX.YELLOW\n para.add_run(post)\n\n doc.save('/Users/eunsung/PycharmProjects/ProjectFile/blog7.docx')\n\ndef Highlighting(infiledir, keyword):\n doc = Document(infiledir)\n #docx 파일을 불러옴.\n p1_text = doc.paragraphs[0].text\n doc.paragraphs[0].clear()\n p2 = doc.add_paragraph()\n substrings = p1_text.split(keyword)\n #keyword 위치를 찾아 highlighting시켜서 다시씀.\n for substring in substrings[:-1]:\n p2.add_run(substring)\n font = p2.add_run(keyword).font\n font.highlight_color = WD_COLOR_INDEX.YELLOW\n p2.add_run(substrings[-1])\n doc.save(infiledir)\n\n\ndef newHilighting(docxFileName,list):\n doc = Document(docxFileName)\n\n\n for paragraph in doc.paragraphs:\n for target in list:\n if target in paragraph.text: # it is worth checking in detail ...\n\n currRuns = copy.copy(paragraph.runs) # deep copy as we delete/clear the object\n paragraph.clear()\n\n for run in currRuns:\n if target in run.text:\n words = re.split('(\\W)', run.text) # split into words in order to be able to color only one\n for word in words:\n if word == target:\n newRun = paragraph.add_run(word)\n newRun.font.highlight_color = WD_COLOR_INDEX.PINK\n else:\n newRun = paragraph.add_run(word)\n newRun.font.highlight_color = None\n else: # our target is not in it so we add it unchanged\n paragraph.runs.append(run)\n\n doc.save('output.docx')\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n","repo_name":"soonmyeong2/tired","sub_path":"src/text analysis/TextSummery.py","file_name":"TextSummery.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"449511760","text":"import numpy\nimport numpy.matlib\nimport time\nfrom ReID_net.Log import log\n\ndef do_cmc_validation(engine,network,data):\n m = data.num_test_id\n n = m * m\n idx_placeholder = data.idx_placeholder\n batch_size = network.batch_size\n debug = network.tags\n path = data.test_case\n end_net = data.use_end_network\n rank_out = \"\"\n errors = \"\"\n measures = {}\n merge_type = engine.config.str(\"merge_type\", \"\")\n\n out_layer_name = engine.config.str(\"output_embedding_layer\",\"fc1\")\n out_layer = network.tower_layers[0][out_layer_name]\n assert len(out_layer.outputs) == 1\n out_feature = out_layer.outputs[0]\n out_feature_size = out_layer.n_features\n\n test_cases = engine.config.unicode_list(\"test_cases\", [])\n\n for test_case in test_cases:\n errs = 0\n y_vals = numpy.empty([0,1])\n probe = numpy.empty([0, out_feature_size])\n gallery = numpy.empty([0, out_feature_size])\n\n idx = 0\n while idx < m:\n start = time.time()\n idx_value = [idx, min(idx + batch_size, m),1,0]\n\n feature_val, msg = engine.session.run([out_feature, debug],\n feed_dict={idx_placeholder: idx_value, path: test_case, end_net: False})\n probe = numpy.concatenate((probe, feature_val), axis=0)\n\n end = time.time()\n elapsed = end - start\n print(min(idx + batch_size, m), '/', m, \"elapsed\", elapsed, file=log.v5)\n idx += batch_size\n\n idx = 0\n while idx < m:\n start = time.time()\n idx_value = [idx, min(idx + batch_size, m), 1, 1]\n\n feature_val, msg = engine.session.run([out_feature, debug],\n feed_dict={idx_placeholder: idx_value, path: test_case, end_net: False})\n gallery = numpy.concatenate((gallery, feature_val), axis=0)\n\n end = time.time()\n elapsed = end - start\n print(min(idx + batch_size, m), '/', m, \"elapsed\", elapsed, file=log.v5)\n idx += batch_size\n\n start = time.time()\n for pdx in range(m):\n idx = 0\n while idx < m:\n idx_value = [idx, min(idx + batch_size, m), pdx, 1]\n r = numpy.arange(idx_value[0], idx_value[1])\n q = (pdx,) * (min(idx + batch_size, m) - idx)\n\n if data.validation_mode == \"similarity\":\n\n y = network.y_softmax\n e = network.measures_accumulated\n in_layer_name = engine.config.str(\"input_embedding_layer\", \"siam_concat\")\n in_layer = network.tower_layers[0][in_layer_name]\n assert len(in_layer.outputs) == 1\n in_feature = in_layer.outputs[0]\n\n if merge_type == \"add\":\n feature_val = probe[q, :] + gallery[r, :]\n elif merge_type == \"subtract\":\n feature_val = probe[q, :] - gallery[r, :]\n elif merge_type == \"abs_subtract\":\n feature_val = numpy.abs(probe[q, :] - gallery[r, :])\n else: # merge_type == \"concat\":\n feature_val = numpy.concatenate((probe[q, :], gallery[r, :]), axis=1)\n\n y_val, err = engine.session.run([y, e], feed_dict={idx_placeholder: idx_value, in_feature: feature_val, end_net: True,path: test_case})\n y_val = y_val[:,0:1]\n errs += err[\"errors\"]\n\n else: # data.validation_mode == \"embedding\":\n y_val = numpy.linalg.norm(probe[q,:] - gallery[r,:],axis=1)\n y_val = numpy.reshape(y_val,[y_val.size,1])\n\n y_vals = numpy.concatenate((y_vals, y_val), axis=0)\n idx += batch_size\n\n y_vals1 = y_vals\n Apsum = 0\n ranks = numpy.zeros(m)\n for i in range(m):\n r = numpy.arange(m * i, m * (i + 1))\n I = numpy.identity(m)\n corr = I[:, i]\n tab = numpy.column_stack((y_vals1[r], corr))\n id = numpy.argsort(y_vals1[r], axis=0)\n tab = tab[id, :]\n pos = numpy.where(tab[:,0, 1])[0]\n ranks[i] = pos[0] + 1\n Ap = numpy.zeros(1)\n f = numpy.zeros(1)\n for j in range(pos.size):\n f += 1\n Ap += f / (pos[j] + 1)\n\n Apsum += Ap\n\n mAp = Apsum / m\n cmc = numpy.zeros(m)\n for i in range(m):\n cmc[i] = 100 / m * ranks[ranks <= i + 1].size\n\n rank1 = cmc[0]\n rank5 = cmc[4]\n rank10 = cmc[9]\n error = errs / n\n\n errors += \"%.3f \" % mAp\n rank_out += \"%.1f \" % rank1 + \"%.1f \" % rank5 + \"%.1f / \" % rank10\n\n measures = {}\n measures[\"ranks\"] = rank_out\n\n end = time.time()\n elapsed = end - start\n print(test_case, \"elapsed\", elapsed, file=log.v5)\n\n return errors, measures","repo_name":"JonathonLuiten/PReMVOS","sub_path":"code/ReID_net/Forwarding/CMC_Validator.py","file_name":"CMC_Validator.py","file_ext":"py","file_size_in_byte":4411,"program_lang":"python","lang":"en","doc_type":"code","stars":127,"dataset":"github-code","pt":"72"}
+{"seq_id":"10025123495","text":"import numpy as np\nfrom lenspyx.remapping.utils_geom import Geom\nfrom delensalot.core.opfilt import tmodes_ninv, ebmodes_ninv\nfrom psutil import cpu_count\n\nlmax = 20\nnside = 256\nthread = cpu_count(logical=False)\n\ngeom = Geom.get_healpix_geometry(nside)\ntpl = tmodes_ninv.template_tfilt(lmax, geom, sht_threads=thread)\n\nNiT = np.ones(geom.npix(), dtype=float)\nmT = tpl.build_tnit(NiT)\nprint('mT shape', mT.shape)\nprint(np.diag(mT)[:20])\n\ntpl = ebmodes_ninv.template_ebfilt(lmax, geom, sht_threads=thread)\n\nNiT = np.ones(geom.npix(), dtype=float)\nmP = tpl.build_tnit(NiT)\n\nprint('mP shape', mP.shape)\nprint(np.diag(mP)[:20])\n\n","repo_name":"NextGenCMB/delensalot","sub_path":"tests/old/test_modes_ninv.py","file_name":"test_modes_ninv.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"}
+{"seq_id":"5544048868","text":"import logging\nimport os\ntry:\n from pathlib2 import Path\nexcept ImportError:\n from pathlib import Path\n\nimport numpy as np\nimport torch\nimport torch.multiprocessing as mp\nimport parmap\nimport spikeglx\n\nfrom detect.detector import Detect\nfrom localization_pipeline.denoiser import Denoise\n\nfrom detect.deduplication import deduplicate_gpu, deduplicate\n\nfrom scipy.signal import argrelmin\n\nfrom detect.run import run\nimport os\nimport numpy as np\nfrom tqdm import tqdm\n# from residual import RESIDUAL\nfrom localization_pipeline.localizer import LOCALIZER\nfrom localization_pipeline.merge_results import get_merged_arrays\n## =============================================\n\ngeom_path =\"/active/ramirez_j/ramirezlab/nbush/helpers/spike_localization_registration_hpc/channels_maps/np1_channel_map.npy\"\npath_nn_detector = \"/active/ramirez_j/ramirezlab/nbush/helpers/spike_localization_registration_hpc/pretrained_detector/detect_np1.pt\"\npath_nn_denoiser = \"/active/ramirez_j/ramirezlab/nbush/helpers/spike_localization_registration_hpc/pretrained_denoiser/denoise.pt\"\nstandardized_path = '/archive/ramirez_j/ramirezlab/nbush/projects/dynaresp/data/ibl_pipeline_test/m2021-32_g0_t0.imec0.ap.standardized.bin'\nstandardized_meta = standardized_path.replace('.bin','.meta')\nstandardized_dtype = 'float32'\nstandardized_path = Path(standardized_path)\nmeta = spikeglx.read_meta_data(Path(standardized_meta))\nsampling_rate = float(meta['imSampRate'])\nlen_recording = int(meta['fileTimeSecs'])\nlen_recording = 1\ndetection_directory = standardized_path.parent.joinpath('detection_results_threshold')\nprint(standardized_path.stem)\ngeom_array = np.load(geom_path)\napply_nn = True ### If set to false, run voltage threshold instead of NN detector\nspatial_radius = 70\nn_sec_chunk = 1\nn_processors = 1\nn_sec_chunk_gpu_detect = .1\ndetect_threshold = 0.5 ## 0.5 if apply NN, 4/5/6 otherwise\nn_filters_detect = [16, 8, 8]\nspike_size_nn = 121 ### In sample steps\nn_filters_denoise = [16, 8, 4]\nfilter_sizes_denoise = [5, 11, 21]\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = str(0)\n\nn_batches = len_recording//n_sec_chunk\n\nprint('Running detect')\nrun(standardized_path, standardized_dtype, detection_directory, geom_array, spatial_radius, apply_nn, n_sec_chunk, n_batches, n_processors, n_sec_chunk_gpu_detect, sampling_rate, len_recording,\n detect_threshold, path_nn_detector, n_filters_detect, spike_size_nn, path_nn_denoiser, n_filters_denoise, filter_sizes_denoise, run_chunk_sec='full')\n\n#\n# ===================================\nprint('Running localize')\nbin_file = standardized_path\nresidual_file = bin_file\n\nfname_spike_train = detection_directory.joinpath('spike_index.npy')\n# Sort spike train if not\nspt_array = np.load(fname_spike_train)\nspt_array = spt_array[spt_array[:, 0].argsort()]\nnp.save(fname_spike_train, spt_array)\n\nn_channels = geom_array.shape[0]\n\ndenoiser_weights = path_nn_denoiser\ndenoiser_min = 42 ## Goes with the weights\n\n\nfname_templates = None\n\nlocalizer_obj = LOCALIZER(bin_file, standardized_dtype, fname_spike_train, fname_templates, geom_path, denoiser_weights, denoiser_min,n_processors=n_processors)\n# localizer_obj.get_offsets()\n# localizer_obj.compute_aligned_templates()\nlocalizer_obj.load_denoiser()\nlocalize_dir = standardized_path.parent.joinpath('position_results')\nif not os.path.exists(localize_dir):\n os.makedirs(localize_dir)\nfor i in tqdm(range(n_batches)):\n localizer_obj.get_estimate(i, threshold = detect_threshold, output_directory =localize_dir)","repo_name":"nbush257/npx_utils","sub_path":"scripts/run_modified_ibl.py","file_name":"run_modified_ibl.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"171260754","text":"import numpy as np\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nfrom projectWk2 import play_strategic_game\r\n\r\ndef main():\r\n sns.set()\r\n results = []\r\n for _ in range(1000):\r\n results.append(play_strategic_game())\r\n result_pie = [results.count(i) for i in range(-1, 3)]\r\n print(result_pie)\r\n plt.pie(result_pie, labels=[-1, 0, 1, 2])\r\n plt.show()\r\n #plt.savefig(\"tictactoe.pdf\")\r\n\r\nmain()","repo_name":"Yodeman/Python_for_Research","sub_path":"pieTicTacToe.py","file_name":"pieTicTacToe.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"7503076336","text":"from collections import namedtuple\n\nfrom Vintageous.tests import ViewTest\nfrom Vintageous.vi.utils import modes\n\ntest_data = namedtuple('test_data', 'text startRegion mode count expectedRegion msg')\n\nNORMAL_CASES = (\n test_data(' 2a4', (3, 3), modes.NORMAL, 1, (2, 2), 'Move to first non-space left'),\n test_data(' 234', (1, 1), modes.NORMAL, 1, (2, 2), 'Move to first non-space right'),\n)\n\nINTERNAL_NORMAL_CASES = (\n # Test cases for 'c' behavior, 'd' behaves differently\n test_data(' 12\\n 56', (0, 0), modes.INTERNAL_NORMAL, 1, (0, 4), 'Internal before first space'),\n test_data(' 12\\n 56', (2, 2), modes.INTERNAL_NORMAL, 1, (0, 4), 'Internal after first space'),\n test_data(' 12\\n 56', (6, 6), modes.INTERNAL_NORMAL, 1, (4, 7), 'Internal from 2nd line'),\n)\n\nVISUAL_MULTI_CHAR_CASES = (\n test_data(' 2ba5', (5, 3), modes.VISUAL, 1, (5, 2), 'Visual first non-space right no crossover'),\n test_data(' 2ab5', (3, 5), modes.VISUAL, 1, (4, 2), 'Visual first non-space right crossover'),\n test_data(' 2345', (2, 0), modes.VISUAL, 1, (1, 3), 'Visual first non-space left crossover'),\n test_data(' 2345', (0, 2), modes.VISUAL, 1, (0, 3), 'Visual first non-space left no crossover'),\n test_data(' 23b5', (1, 5), modes.VISUAL, 1, (1, 3), 'Visual first non-space forward'),\n test_data(' 23a5', (5, 1), modes.VISUAL, 1, (5, 2), 'Visual first non-space reverse'),\n)\n\nVISUAL_ONE_CHAR_CASES = (\n test_data('f', (0, 1), modes.VISUAL, 1, (0, 1), 'Visual single character forward'),\n test_data('r', (1, 0), modes.VISUAL, 1, (1, 0), 'Visual single character reverse'),\n)\n\nVISUAL_MULTI_LINE_CASES = (\n test_data(' 123\\n 678', (0, 5), modes.VISUAL, 1, (0, 2), 'Visual caret on newline'),\n test_data(' 123\\n 678', (8, 4), modes.VISUAL, 1, (8, 1), 'Visual caret on newline reverse'),\n test_data(' 123\\n 678', (2, 8), modes.VISUAL, 1, (2, 7), 'Visual forward multiline'),\n test_data(' 123\\n 678', (8, 2), modes.VISUAL, 1, (8, 1), 'Visual reverse multiline'),\n)\n\nMULTI_COUNT_NORMAL_CASES = (\n test_data(' 123\\n 678', (0, 0), modes.NORMAL, 2, (6, 6), 'Normal count 2 move right'),\n test_data(' 123\\n 678', (2, 2), modes.NORMAL, 2, (6, 6), 'Normal count 2 move left'),\n test_data(' 123\\n 678', (0, 0), modes.NORMAL, 3, (6, 6), 'Normal count 3 with only 2 lines'),\n)\n\nMULTI_COUNT_INTERNAL_NORMAL_CASES = (\n # Test cases for 'c' behavior, 'd' behaves differently\n test_data(' 123\\n 678\\n bcd', (2, 2), modes.INTERNAL_NORMAL, 2, (0, 10), 'Internal count 2'),\n test_data(' 123\\n 678\\n bcd', (7, 7), modes.INTERNAL_NORMAL, 3, (5, 14), 'Internal over count'),\n)\n\nMULTI_COUNT_VISUAL_CASES = (\n test_data(' 123\\n 678', (0, 3), modes.VISUAL, 2, (0, 7), 'Visual count 2 no crossover'),\n test_data(' 123\\n 678', (3, 0), modes.VISUAL, 2, (2, 7), 'Visual count 2 crossover'),\n test_data(' 123\\n 678', (0, 3), modes.VISUAL, 3, (0, 7), 'Visual count 3 with only 2 lines'),\n)\n\nclass Test_vi_underscore(ViewTest):\n def runTests(self, data):\n for (i, data) in enumerate(data):\n self.write(data.text)\n self.clear_sel()\n self.add_sel(self.R(*data.startRegion))\n self.view.run_command('_vi_underscore', {'mode': data.mode, 'count': data.count})\n self.assert_equal_regions(self.R(*data.expectedRegion), self.first_sel(),\n \"Failed on index {} {} : Text:\\\"{}\\\" Region:{}\"\n .format(i, data.msg, data.text, data.startRegion))\n\n def testNormalCases(self):\n self.runTests(NORMAL_CASES)\n\n def testInternalNormalCases(self):\n self.runTests(INTERNAL_NORMAL_CASES)\n\n def testVisualMultipleCharacterCases(self):\n self.runTests(VISUAL_MULTI_CHAR_CASES)\n\n def testVisualSingleCharacterCases(self):\n self.runTests(VISUAL_ONE_CHAR_CASES)\n\n def testVisualMultipleLinesCases(self):\n self.runTests(VISUAL_MULTI_LINE_CASES)\n\n def testMultipleCountNormalCases(self):\n self.runTests(MULTI_COUNT_NORMAL_CASES)\n\n def testMultipleCountInternalNormalCases(self):\n self.runTests(MULTI_COUNT_INTERNAL_NORMAL_CASES)\n\n def testMultipleCountVisualCases(self):\n self.runTests(MULTI_COUNT_VISUAL_CASES)\n","repo_name":"guillermooo/Vintageous","sub_path":"tests/commands/test__vi_underscore.py","file_name":"test__vi_underscore.py","file_ext":"py","file_size_in_byte":4193,"program_lang":"python","lang":"en","doc_type":"code","stars":1641,"dataset":"github-code","pt":"72"}
+{"seq_id":"32625849738","text":"from platform import node\n\n\nclass Tree:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\nroot = Tree(10)\nroot.left = Tree(20)\nroot.right = Tree(30)\nroot.right.left = Tree(31)\nroot.right.right = Tree(32)\nroot.left.left = Tree(40)\nroot.left.right = Tree(50)\nroot.left.left.left = Tree(60)\nroot.left.left.right = Tree(70)\n\ndef deleteTree(node):\n if not node:\n return \n\n else:\n deleteTree(node.left)\n deleteTree(node.right)\n del node\n\ndeleteTree(root)\n\n","repo_name":"medeepeshyadav/Data-Structures-and-Algorithms","sub_path":"Trees/tree_programs/recursive/deleteTree.py","file_name":"deleteTree.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"74204933994","text":"\"\"\"Main script for step: Transform Data\"\"\"\n\nimport argparse\nimport gzip\nimport sys\nimport os\nimport numpy as np\nfrom azureml.core import Run, Workspace, Datastore, Dataset\nfrom azureml.core.authentication import ServicePrincipalAuthentication\nfrom environs import Env\nimport json\n\nprint(\"Transforming data...\")\nsys.path.append(os.getcwd())\nsys.path.append('./src/')\n\nimport forecaster.feat_engineering as feat\nimport forecaster.utilty as utils\n\n\n# --- initialization\nprint(\"Initialization...\")\n# - define and parse script arguments\nparser = argparse.ArgumentParser(allow_abbrev=False)\nparser.add_argument(\"--input-dir\", type=str, required=True, help=\"input directory\")\nparser.add_argument(\"--output-dir\", type=str, required=True, help=\"output directory\")\nargs = parser.parse_args()\ninput_dir = args.input_dir\noutput_dir = args.output_dir\n\n# - get run context\nrun = Run.get_context()\n\n# - ensure that the output directory exists\nprint(\"Ensuring that the output directory exists...\")\nos.makedirs(output_dir, exist_ok=True)\n\n# --- get workspace and datastore\nws = run.experiment.workspace\ndatastore = Datastore.get_default(ws)\n\n# # --- load data\n# def load_gz_data(path):\n# # train labels\n# with gzip.open(os.path.join(path, \"train-labels-idx1-ubyte.gz\"), \"rb\") as label_path:\n# train_labels = np.frombuffer(label_path.read(), dtype=np.uint8, offset=8)\n\n# # train images\n# with gzip.open(os.path.join(path, \"train-images-idx3-ubyte.gz\"), \"rb\") as image_path:\n# train_images = np.frombuffer(image_path.read(), dtype=np.uint8, offset=16).reshape(len(train_labels), 28, 28)\n\n# # test labels\n# with gzip.open(os.path.join(path, \"t10k-labels-idx1-ubyte.gz\"), \"rb\") as label_path:\n# test_labels = np.frombuffer(label_path.read(), dtype=np.uint8, offset=8)\n\n# # test images\n# with gzip.open(os.path.join(path, \"t10k-images-idx3-ubyte.gz\"), \"rb\") as image_path:\n# test_images = np.frombuffer(image_path.read(), dtype=np.uint8, offset=16).reshape(len(test_labels), 28, 28)\n\n# return train_images, train_labels, test_images, test_labels\n\n\n# train_images, train_labels, test_images, test_labels = load_gz_data(input_dir)\n\n\n# # --- convert label files to .npz format (for consistency)\n# print(f\"Converting label files to .npz format for consistency...\")\n# np.savez_compressed(os.path.join(output_dir, \"train-labels-transformed.npz\"), train_labels)\n# np.savez_compressed(os.path.join(output_dir, \"t10k-labels-transformed.npz\"), test_labels)\n\n\n# # --- normalize and reshape the images\n# print(f\"Reshaping and normalizing images...\")\n# train_images = train_images / 255.0\n# np.savez_compressed(os.path.join(output_dir, \"train-images-transformed.npz\"), train_images)\n\n# test_images = test_images / 255.0\n# np.savez_compressed(os.path.join(output_dir, \"t10k-images-transformed.npz\"), test_images)\n\n# --- get full paths\ninput_path = os.path.join(input_dir)\noutput_path = os.path.join(output_dir)\n\n# --- load input\nprint(f\"Load file from last step ...\")\ndf_all_data = utils.load_df_from_file('df_all_data', input_path, 'parquet')\n\n# --- add features\nprint(f\"Add features to survey data ...\")\ndf_with_features = feat.generate_features(df_all_data)\n\n# --- define output parameters\noutput_fname = 'df_with_features'\nmode = 'parquet'\n\n# --- register dataset\ndf_for_register = utils.unset_datecol_as_index_if_needed(df_with_features)\nDataset.Tabular.register_pandas_dataframe(df_for_register, (datastore, 'azure-ml-datasets'), 'survey_data_with_all_features')\n\n# --- write output\nprint(\"Writing file \"+ output_fname +\".\"+mode+\" to path \"+output_path+\" ...\")\nutils.write_df_to_file(df_with_features, output_fname, output_path, mode, force_write=True)\n\n\n# --- Done\nprint(\"Done.\")","repo_name":"AndreasDit/Sonntagsfrage","sub_path":"src/Azure_ML/03_pipeline/04_transform_data/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"8403386970","text":"\"\"\"Deformable detection heads.\"\"\"\n\nfrom dataclasses import dataclass, field\n\nfrom omegaconf import DictConfig\nfrom pytorch_lightning.core import LightningModule\nfrom torch import Tensor\nfrom torch.nn import BatchNorm2d, Conv2d, ReLU, Sequential\n\nfrom torchbox3d.nn.blocks.deformable import DeformableBlock\nfrom torchbox3d.nn.heads.conv import ConvHead\nfrom torchbox3d.structures.outputs import TaskOutputs\n\n\n@dataclass(unsafe_hash=True)\nclass DeformableDetectionHead(LightningModule):\n \"\"\"Construct deformable convolution head.\n\n Args:\n num_cls: Number of detection classes.\n heads: Head configuration.\n in_channels: Number of input channels.\n out_channels: Number of output channels.\n final_kernel: Number of channels in the final kernel.\n bn: Flag to enable batch normalization.\n kernel_size: Kernel size.\n groups: Number of groups.\n padding: Padding size.\n stride: Network stride.\n bias: Flag to use bias.\n \"\"\"\n\n num_cls: int\n heads: DictConfig\n in_channels: int\n out_channels: int = 64\n final_kernel: int = 1\n bn: bool = False\n kernel_size: int = 3\n groups: int = 4\n padding: int = 1\n stride: int = 1\n bias: bool = True\n\n classification_head: Sequential = field(init=False)\n regression_head: Sequential = field(init=False)\n\n def __post_init__(self) -> None:\n \"\"\"Initialize network modules.\"\"\"\n super().__init__()\n self.classification_head = Sequential(\n DeformableBlock(\n self.in_channels,\n self.in_channels,\n kernel_size=self.kernel_size,\n groups=self.groups,\n ),\n Conv2d(\n self.in_channels,\n self.out_channels,\n kernel_size=self.kernel_size,\n padding=self.padding,\n bias=self.bias,\n ),\n BatchNorm2d(self.out_channels), # type: ignore\n ReLU(inplace=True),\n Conv2d(\n self.out_channels,\n self.num_cls,\n kernel_size=self.kernel_size,\n stride=self.stride,\n padding=self.padding,\n bias=self.bias,\n ),\n )\n\n self.regression_head = Sequential(\n DeformableBlock(\n self.in_channels,\n self.in_channels,\n kernel_size=self.kernel_size,\n groups=self.groups,\n ),\n ConvHead(\n self.heads,\n self.in_channels,\n out_channels=self.out_channels,\n bn=self.bn,\n final_kernel=self.final_kernel,\n ),\n )\n\n def forward(self, x: Tensor) -> TaskOutputs: # type: ignore[override]\n \"\"\"Network forward pass.\n\n Args:\n x: (B,C,H,W) Tensor of network inputs.\n\n Returns:\n Classification and regression heatmaps.\n \"\"\"\n logits = self.classification_head(x)\n regressands = self.regression_head(x)\n return TaskOutputs(logits=logits, regressands=regressands)\n","repo_name":"benjaminrwilson/torchbox3d","sub_path":"src/torchbox3d/nn/heads/deformable.py","file_name":"deformable.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"72"}
+{"seq_id":"15224188629","text":"import sys, collections\n\n\ninfile = sys.stdin\n\ndef find_square(S):\n qpos = [i for i,ch in enumerate(S) if ch=='?']\n ns = len(S)\n nq = len(qpos)\n baseval = int(S.replace('?','0'), 2)\n # all combinations of 0s and 1s for ? chars\n for i in xrange(1<\", \"\", directions_list))\r\n\t\t\r\n\t\tif directions_list.endswith('Destination will be on the right\\n'):\r\n\t\t\tdirections_list = directions_list.replace('Destination will be on the right\\n', '\\nDestination will be on the right\\n')\r\n\t\telif directions_list.endswith(\"Destination will be on the left\\n\"):\r\n\t\t\tdirections_list = directions_list.replace('Destination will be on the left\\n', '\\nDestination will be on the left\\n')\r\n\t\t\t\t\t\r\n\t#Sends string as a text message.\r\n\tresp.message(directions_list)\t\r\n\treturn str(resp)\r\n\t\r\nif __name__ == \"__main__\":\r\n\tapp.run(debug = True)\r\n","repo_name":"syedrokib/enghack-2017","sub_path":"app/receive.py","file_name":"receive.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"3028646100","text":"from django.shortcuts import render,redirect\r\nfrom django.http import HttpResponse\r\nfrom shopping.models import Product,Cart,Customer\r\nfrom django.contrib.auth.forms import UserCreationForm\r\nfrom django.contrib.auth.forms import AuthenticationForm\r\nfrom shopping.forms import CartForm,CustomerForm\r\nfrom django.contrib.auth.models import User\r\nfrom django.contrib.auth import login as auth_login\r\nfrom django.contrib.auth import logout as auth_logout\r\nfrom django.core.mail import send_mail\r\n\r\n# Create your views here.\r\n\r\ndef index(request): #main page with mail functionality without using celery \r\n mobile=Product.objects.filter(category='M')\r\n tshirt=Product.objects.filter(category='T')\r\n laptop=Product.objects.filter(category='L')\r\n if request.method == 'POST': # i remove my password from settings.py\r\n to = request.POST.get('recipient_email_address')\r\n send_mail('hello this is subject','this is mail body so read it properly','varaiyaniravn@gmail.com',[to,],fail_silently=False)\r\n #return HttpResponse(\"hello
\")\r\n \r\n return render(request,'shop/index.html',{'mobile':mobile,'tshirt':tshirt,'laptop':laptop})\r\n \r\n \r\n\r\ndef detail(request,id):\r\n product=Product.objects.get(pk=id)\r\n if request.method == 'POST':\r\n form = CartForm(request.POST)\r\n if form.is_valid():\r\n user=request.user \r\n quantity=request.POST.get('quantity') \r\n Cart(user=user,product=product,quantity=quantity).save()\r\n #return HttpResponse(\"hello
\")\r\n return redirect('/showcart')\r\n else:\r\n form = CartForm()\r\n return render(request,'shop/detail.html',{'form':form,'product':product})\r\n \r\n \r\n\r\ndef register(request):\r\n if request.method == \"POST\":\r\n form=UserCreationForm(request.POST)\r\n if form.is_valid():\r\n form.save()\r\n #return HttpResponse(\"hii nirav
\")\r\n else:\r\n form=UserCreationForm()\r\n return render(request,'shop/register.html',{'form':form})\r\n\r\ndef login(request): #for user login functionality\r\n if request.method == \"POST\":\r\n form=AuthenticationForm(request,data=request.POST)\r\n if form.is_valid():\r\n user=form.get_user()\r\n auth_login(request,user)\r\n return redirect('/')\r\n else:\r\n form=AuthenticationForm()\r\n return render(request,'shop/login.html',{'form':form})\r\n\r\ndef search(request): #for search bar functionality\r\n if request.method == 'GET':\r\n search=request.GET.get('search')\r\n pr=Product.objects.filter(brand__iexact=search)\r\n #return HttpResponse(\"hii nirav
\")\r\n return render(request,'shop/search.html',{'pr':pr})\r\n\r\ndef showcart(request): # for cart funcctionality\r\n if request.user.is_authenticated:\r\n user=request.user\r\n cart=Cart.objects.filter(user=user)\r\n customer=Customer.objects.filter(user=user)\r\n amount=0.0\r\n totalamount=0.0\r\n cart_product=[p for p in Cart.objects.all() if p.user == user]\r\n print(cart_product)\r\n number_of_item=0\r\n if cart_product:\r\n for p in cart_product:\r\n temp=(p.quantity * p.product.discountprice)\r\n amount += temp\r\n number_of_item=number_of_item+1\r\n totalamount = amount\r\n print(number_of_item)\r\n return render(request,'shop/addtocart.html',\r\n {'cart':cart,'customer':customer,'total':totalamount,'item':number_of_item}) \r\n\r\n\r\ndef remove(request,id):\r\n cart=Cart.objects.get(pk=id)\r\n user=request.user\r\n if user:\r\n cart.delete()\r\n return redirect('/showcart')\r\n\r\ndef logout(request): #for user logout functionality\r\n if request.method == \"POST\":\r\n auth_logout(request)\r\n return redirect('/')\r\n\r\ndef profile(request):\r\n if request.method == \"POST\":\r\n form=CustomerForm(request.POST)\r\n if form.is_valid():\r\n user=request.user\r\n name=request.POST.get('name')\r\n locality=request.POST.get('locality')\r\n city=request.POST.get('city')\r\n zipcode=request.POST.get('zipcode')\r\n phone_number=request.POST.get('phone_number')\r\n Customer(user=user,name=name,locality=locality,city=city,zipcode=zipcode,phone_number=phone_number).save()\r\n return redirect(\"/\")\r\n else:\r\n form=CustomerForm()\r\n return render(request,'shop/profile.html',{'form':form})\r\n\r\n\r\n\r\n","repo_name":"niravvaraiya/webshop","sub_path":"shopping/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"51391579","text":"# -*- coding: utf-8 -*-\n\nimport base64\nimport pytest\nfrom util.tsoy import TSoY\nfrom util.const import HNDL, CTXS, USER_AGENT\nfrom search.begemot.rules.src_setup.web.proto.result_pb2 import TSrcSetupAdjustWebProtoResult\n\n\nclass TestBlogs():\n @pytest.mark.ticket(\"SERP-40011\")\n @pytest.mark.parametrize((\"path\", \"params\"), [\n (HNDL.BLOGS_SEARCH, {\"text\": \"котики facebook\"})\n ])\n @TSoY.yield_test\n def test_blogs_granny(self, query, path, params):\n query.SetUserAgent(USER_AGENT.GRANNY)\n query.SetPath(path)\n query.SetParams(params)\n query.SetDumpFilter(resp=[CTXS.INIT])\n\n resp = yield query\n dc = resp.GetCtxs()[\"device_config\"][0]\n expect = {\n \"device\": \"desktop\",\n \"device_modifier\": \"\",\n \"template_name\": \"web4:desktop\",\n \"template_path\": \"v8:web4:desktop\",\n \"type\": \"device_config\",\n \"v2\": \"v2\"\n }\n assert expect == dc\n\n @pytest.mark.ticket(\"SEARCH-11506\")\n @TSoY.yield_test\n def test_blogs_resp(self, query):\n query.SetPath(HNDL.BLOGS_SEARCH)\n query.SetDumpFilter(resp=[CTXS.WIZARDRY_WEB_SETUP])\n\n resp = yield query\n data = resp.GetCtxs()[\"web_setup\"][0][\"binary\"]\n web_setup = TSrcSetupAdjustWebProtoResult()\n web_setup.ParseFromString(base64.b64decode(data))\n for text in web_setup.Result.SourceRequest.Request:\n got = text\n break\n assert got.find(\" ppbhost:\\\"1\\\"::\") != -1\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"Search engine/test_suits/web/test_blogs.py","file_name":"test_blogs.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"29091738498","text":"import tensorflow as tf\nimport numpy as np\nfrom ..model_loader import DatasetModelLoader\nimport tensorflow_federated as tff\n\n\nclass Cifar100_tff(DatasetModelLoader):\n\n def get_dataset(self, mislabelling_percentage=0): # https://www.tensorflow.org/federated/api_docs/python/tff/simulation/datasets/cifar100\n (cifar100_train), (cifar100_test) = tff.simulation.datasets.cifar100.load_data(cache_dir=\"./datasets\")\n\n sample_clients_train = cifar100_train.client_ids[0:self.num_devices]\n sample_clients_test = cifar100_test.client_ids[0:self.num_devices]\n\n federated_train_data = [(cifar100_train.create_tf_dataset_for_client(x)) for x in sample_clients_train]\n federated_test_data = [(cifar100_test.create_tf_dataset_for_client(x)) for x in sample_clients_test]\n\n print(type(federated_train_data))\n images_train, labels_train = self.get_images_and_labels(federated_train_data)\n images_test, labels_test = self.get_images_and_labels(federated_test_data)\n\n images_train, images_test = images_train / 255.0, images_test / 255.0\n\n return images_train, labels_train, images_test, labels_test\n\n # Image classification task\n def get_compiled_model(self, optimizer: str, metric: str, train_data): # https://www.tensorflow.org/tutorials/images/cnn\n\n tf_model = tf.keras.models.Sequential()\n tf_model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))\n tf_model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n tf_model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu'))\n tf_model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n tf_model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu'))\n tf_model.add(tf.keras.layers.Flatten())\n tf_model.add(tf.keras.layers.Dense(64, activation='relu'))\n tf_model.add(tf.keras.layers.Dense(100))\n\n tf_model.compile(optimizer=optimizer,\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[metric])\n\n return tf_model\n\n def get_loss_function(self):\n return tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n\n @staticmethod\n def get_images_and_labels(federated_data):\n images_numpy = []\n labels_numpy = []\n\n for client_data in federated_data:\n for client_image in client_data:\n images_numpy.append(np.float64(client_image['image'].numpy()))\n labels_numpy.append(client_image['label'].numpy())\n\n return np.array(images_numpy), np.array(labels_numpy)\n\n @staticmethod\n def get_images_and_labels_by_client(federated_data):\n images_numpy = []\n labels_numpy = []\n\n for client_data in federated_data:\n images_of_client = []\n labels_of_client = []\n for client_image in client_data:\n images_of_client.append(np.float64(client_image['image'].numpy()))\n labels_of_client.append(client_image['label'].numpy())\n images_numpy.append(images_of_client.numpy())\n labels_numpy.append(labels_of_client.numpy())\n\n return np.array(images_numpy), np.array(labels_numpy)\n\n def select_non_iid_samples(self, y, num_clients, nk, alpha):\n\n (cifar100_train), (cifar100_test) = tff.simulation.datasets.emnist.load_data()\n\n sample_clients_train = cifar100_train.client_ids[0:num_clients]\n\n federated_train_data = [cifar100_train.create_tf_dataset_for_client(x) for x in sample_clients_train]\n\n images_train, labels_train = self.get_images_and_labels_by_client(federated_train_data)\n\n clients_data_indexes = []\n i = 0\n\n for client_images in images_train:\n client_indexes = []\n for image in client_images:\n client_indexes.append(i)\n i = i + 1\n clients_data_indexes.append(client_indexes)\n\n return clients_data_indexes\n","repo_name":"NicholasRasi/FL-Simulator","sub_path":"fl_sim/dataset/datasets/tff_cifar100.py","file_name":"tff_cifar100.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"19889369727","text":"import matplotlib.pyplot as plt\nimport numpy as np\nplt.style.use(\"classic\")\n\ndef f(t):\n return np.exp(-(t)**2/2)/(np.sqrt(2*np.pi))\n\nxv = np.linspace(-7,7,1000)\nyv = f(xv)\n\nplt.plot(xv, yv, lw=1)\nplt.xlabel(\"$t$\")\nplt.ylabel(\"$f(t)$\")\nplt.grid(True)\n\nimport tikzplotlib\ntikzplotlib.clean_figure()\ntikzplotlib.save(\"gaussian.tex\")\n","repo_name":"pritishkarmakar17/Summer23","sub_path":"gaussian.py","file_name":"gaussian.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"4497084637","text":"from math import factorial\nfrom collections import Counter\n\ndef gcd(a,b):\n while b:\n a, b = b, a % b\n return a\n\ndef num_cycle(c, n):\n nc = factorial(n)\n for i, j in Counter(c).items():\n nc //= (i ** j) * factorial(j)\n return nc \n\ndef part_cycle(n, i=1):\n yield [n]\n for i in range(i, n//2+1):\n for p in part_cycle(n-i, i):\n yield [i]+p\n\ndef solution(w, h, s):\n # Your code here\n mat = 0\n for cycle_w in part_cycle(w):\n for cycle_h in part_cycle(h): \n n = num_cycle(cycle_w, w) * num_cycle(cycle_h, h)\n mat += n * (s ** sum([sum([gcd(i, j) for i in cycle_w]) for j in cycle_h]))\n return str(mat // (factorial(w) * factorial(h)))","repo_name":"sdnitrogen/google-foobar","sub_path":"level5/disorderly_escape.py","file_name":"disorderly_escape.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"7668409209","text":"def bubble_sort(seq, asc=True): \n for outer in range(len(seq)):\n for inner in range(outer+1, len(seq)):\n if asc and seq[outer] > seq[inner]:\n swap(seq, outer, inner)\n elif not asc and seq[outer] < seq[inner]:\n swap(seq, outer, inner)\n\n return seq\n\ndef swap(seq, one, two):\n seq[one] = seq[one] + seq[two]\n seq[two] = seq[one] - seq[two]\n seq[one] = seq[one] - seq[two]\n return seq\n\nprint(bubble_sort([2,1,5,6,3,8,6,4,7,88,4,5,67,34,32,56,78,79,234,90,77,68,85]))\nprint(bubble_sort([2,1,5,6,3,8,6,4,7,88,4,5,67,34,32,56,78,79,234,90,77,68,85], False))","repo_name":"varanasikalyan/ds","sub_path":"sort/bubble.py","file_name":"bubble.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"21168459349","text":"import multiprocessing,time\n\n\ndef consumer(input_q):\n print('Into consumer:',time.ctime())\n while True:\n # 处理项\n item = input_q.get()\n if item is None:\n break\n print('pull', item, 'out of q') #此处替换为有用的工作\n input_q.task_done() #发出信号通知任务完成\n print('Out of consumer:',time.ctime())\n\ndef producer(sequence, output_q):\n print('Into producer:',time.ctime())\n for item in sequence:\n output_q.put(item)\n print('put',item,'into q')\n print('Out of producer:',time.ctime())\n\nif __name__ == '__main__':\n q = multiprocessing.JoinableQueue()\n p1 = multiprocessing.Process(target=consumer,args=(q,))\n p1.start()\n\n p2 = multiprocessing.Process(target=consumer, args=(q,))\n p2.start()\n producer(range(100),q)\n # 如果是多个进程的情况下,可以设置多个哨兵,多个哨兵并不互相影响\n q.put(None)\n q.put(None)\n p1.join()\n p2.join()","repo_name":"yz5201214/python_study","sub_path":"chapter4/chapter4-thread1/case15.py","file_name":"case15.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"11862248146","text":"from openalea.container import Topomesh\r\nimport raw_read\r\n\r\ndef read (filename) :\r\n\tfile_descr=raw_read.read(filename)\r\n\tdimX,dimY,dimZ,vX,vY,vZ=file_descr[-1]\r\n\tscale=(vX,vY,vZ)\r\n\tm=Topomesh(3)\r\n\tmesh_prop=[]\r\n\tprop={}\r\n\tfor pid,(dum,pt_prop) in file_descr[0].iteritems() :\r\n\t\tm.add_wisp(0,pid)\r\n\t\tprop[pid]=tuple(scale[i]*pt_prop[i] for i in xrange(3))\r\n\tmesh_prop.append(prop)\r\n\tfor i in xrange(1,4) :\r\n\t\tprop={}\r\n\t\tfor wid,(border_list,wisp_prop) in file_descr[i].iteritems() :\r\n\t\t\tm.add_wisp(i,wid)\r\n\t\t\tprop[wid]=wisp_prop\r\n\t\t\tfor bid in border_list :\r\n\t\t\t\tm.link(i,wid,bid)\r\n\t\tmesh_prop.append(prop)\r\n\treturn m,mesh_prop\r\n","repo_name":"jldinh/vplants","sub_path":"tissue/vmanalysis/src/vmanalysis/serial/mesh_read.py","file_name":"mesh_read.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"72075229672","text":"#input_path = 'input/a_example.txt'\ninput_path = 'input/b_lovely_landscapes.txt'\n\nfrom hash_io import *\n\nimages = Input_parser(input_path).images\nprint(input_path)\nprint(len(images), 'images')\n\nfrom simple_solutions import *\nslides = dumb_solution(images)\nprint(len(slides), 'slides')\n\nfrom scoring import *\nprint('dumb score=', calc_score(slides))\n\ndef find_best_pair(slide_chains):\n \"\"\"slide_chains: list of lists of chains\"\"\"\n n_chains = len(slide_chains)\n n_chains = 5000\n print('calculating {} scores'.format(n_chains*n_chains))\n best_pair = None\n best_score = 0\n for ind_0 in range(n_chains):\n for ind_1 in range(n_chains):\n score = calc_pair_score(slide_chains[ind_0][-1], slide_chains[ind_1][0])\n if score > best_score:\n best_score = score\n best_pair = (ind_0, ind_1)\n return best_pair\n\ndef init_slide_chains(slides):\n \"\"\"Return trivial slide chains\"\"\"\n return [(s,) for s in slides]\n\nslide_chains = init_slide_chains(slides)\nprint(slide_chains)\n\nbest_pair = find_best_pair(slide_chains)\nprint(best_pair)\n\n#writer = Output_writer(slides)\n#writer.write_result_file('a_dumb.txt')\n\n\n","repo_name":"denissimakov/tbhash2019","sub_path":"denis_cnf.py","file_name":"denis_cnf.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"22787946857","text":"# Definition for a binary tree node.\r\n# class TreeNode:\r\n# def __init__(self, val=0, left=None, right=None):\r\n# self.val = val\r\n# self.left = left\r\n# self.right = right\r\nclass Solution:\r\n def goodNodes(self, root: TreeNode) -> int:\r\n # Time: O(n)\r\n # Space: O(d)\r\n \r\n def dfs(node, maxVal):\r\n if not node:\r\n return\r\n \r\n if node.val >= maxVal:\r\n count[0] += 1\r\n maxVal = max(maxVal, node.val)\r\n \r\n dfs(node.left, maxVal)\r\n dfs(node.right, maxVal)\r\n \r\n \r\n count = [0] # The root is always good\r\n dfs(root, root.val)\r\n return count[0]","repo_name":"NaralC/Algorithms-Interview-Questions","sub_path":"Leetcode/Medium/1448-Count-Good-Nodes-in-Binary-Tree.py","file_name":"1448-Count-Good-Nodes-in-Binary-Tree.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"14833942439","text":"\nimport numpy as np\nimport torch\nfrom torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedSparseTensor\n\n\n_MIN_DIM_SIZE = 16\n_MAX_DIM_SIZE = 16 * 1024 ** 2\n_POW_TWO_SIZES = tuple(2 ** i for i in range(\n int(np.log2(_MIN_DIM_SIZE)),\n int(np.log2(_MAX_DIM_SIZE)) + 1,\n))\n\nclass UnaryOpSparseFuzzer(Fuzzer):\n def __init__(self, seed, dtype=torch.float32, cuda=False):\n super().__init__(\n parameters=[\n # Sparse dim parameter of x. (e.g. 1D, 2D, or 3D.)\n FuzzedParameter(\"dim_parameter\", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True),\n FuzzedParameter(\n name=\"sparse_dim\",\n distribution={1: 0.4, 2: 0.4, 3: 0.2},\n strict=True\n ),\n # Shapes for `x`.\n # It is important to test all shapes, however\n # powers of two are especially important and therefore\n # warrant special attention. This is done by generating\n # both a value drawn from all integers between the min and\n # max allowed values, and another from only the powers of two\n # (both distributions are loguniform) and then randomly\n # selecting between the two.\n [\n FuzzedParameter(\n name=f\"k_any_{i}\",\n minval=_MIN_DIM_SIZE,\n maxval=_MAX_DIM_SIZE,\n distribution=\"loguniform\",\n ) for i in range(3)\n ],\n [\n FuzzedParameter(\n name=f\"k_pow2_{i}\",\n distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES}\n ) for i in range(3)\n ],\n [\n FuzzedParameter(\n name=f\"k{i}\",\n distribution={\n ParameterAlias(f\"k_any_{i}\"): 0.8,\n ParameterAlias(f\"k_pow2_{i}\"): 0.2,\n },\n strict=True,\n ) for i in range(3)\n ],\n FuzzedParameter(\n name=\"density\",\n distribution={0.1: 0.4, 0.05: 0.3, 0.01: 0.3},\n ),\n FuzzedParameter(\n name=\"coalesced\",\n distribution={True: 0.5, False: 0.5},\n ),\n FuzzedParameter(name=\"random_value\", minval=0, maxval=2 ** 32 - 1, distribution=\"uniform\"),\n ],\n tensors=[\n FuzzedSparseTensor(\n name=\"x\",\n size=(\"k0\", \"k1\", \"k2\"),\n dim_parameter=\"dim_parameter\",\n sparse_dim=\"sparse_dim\",\n min_elements=4 * 1024,\n max_elements=32 * 1024 ** 2,\n density=\"density\",\n coalesced=\"coalesced\",\n dtype=dtype,\n cuda=cuda,\n ),\n ],\n seed=seed,\n )\n","repo_name":"pytorch/pytorch","sub_path":"torch/utils/benchmark/op_fuzzers/sparse_unary.py","file_name":"sparse_unary.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"}
+{"seq_id":"4948964291","text":"#!/usr/bin/env python3\n\nwith open('input.txt', 'r') as f:\n data = [line.strip().split(' | ') for line in f.readlines()]\n\nprint(sum([\n len([output for output in outputs.split() if len(output) in [2, 3, 4, 7]])\n for _, outputs in data\n]))\n\ndef create_num_map(digits):\n nums = [None] * 10\n nums[1], nums[7], nums[4], *rest, nums[8] = sorted(digits, key=len)\n\n for digit in rest[3:]: # 0, 6, 9\n if digit.issuperset(nums[4]): # 9\n nums[9] = digit\n elif digit.issuperset(nums[1]): # 0\n nums[0] = digit\n else: # 6\n nums[6] = digit\n\n for digit in rest[:3]: # 2, 3, 5\n if digit.issubset(nums[6]): # 5\n nums[5] = digit\n elif digit.issubset(nums[9]): # 3\n nums[3] = digit\n else: # 2\n nums[2] = digit\n\n return {num: str(i) for i, num in enumerate(nums)}\n\nacc = 0\nfor digits, outputs in data:\n num_map = create_num_map(map(frozenset, digits.split()))\n acc += int(''.join([num_map[frozenset(output)] for output in outputs.split()]))\n\nprint(acc)\n","repo_name":"Rattko/Advent-of-Code","sub_path":"2021/08/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"38258096597","text":"#!/usr/local/bin/python3\n\nimport json\nimport sys\nfrom subprocess import *\nfrom math import *\n\nfilename = 'input/hdr.json'\n\nwith open(filename) as f:\n data = json.load(f)\n\ndata[\"scene\"][\"outputFilePath\"] = \"output/offset/\"\n\nfor i in range(360):\n data[\"scene\"][\"count\"] = i\n data[\"scene\"][\"ambientColor\"][\"offset\"] = i\n\n proc = Popen('./bin/c-ray ', stdin=PIPE, shell=True)\n proc.stdin.write(json.dumps(data).encode())\n proc.communicate()\n","repo_name":"vkoskiv/c-ray","sub_path":"scripts/animations/offset.py","file_name":"offset.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":719,"dataset":"github-code","pt":"72"}
+{"seq_id":"74577881511","text":"\nimport click\nimport numpy as np\nimport pandas as pd\nimport cupy as cp\nimport re\n\nimport yaml\nfrom typing import Sequence, Tuple, Optional, Dict\nfrom pathlib import Path\nfrom tifffile import imread, imwrite\nfrom cucim.skimage.transform import downscale_local_mean\nfrom cucim.skimage.filters import gaussian\nfrom skimage.measure import regionprops\nfrom skimage.measure._regionprops import RegionProperties\nfrom tqdm import tqdm\nimport napari\n\nfrom dexp.processing.morphology import area_white_top_hat\nfrom segmentation import segment_with_WS\n\n\nDISPLAY = False\nSAVE = True\nCELL_CHANNEL = 0\nCHANNELS = ['DAPI', 'SOX2', 'TBXT', 'OCT']\nZ_SCALE = 2\nAREA_OPENING_THOLD = 1e4\nSUMMARY_FUN = np.sum\n\n\ndef find_image_paths(images_dir: Path) -> Sequence[Path]:\n errors = 'ERRORS\\n'\n paths = []\n\n for im_path in tqdm(images_dir.glob('**/*.tif'), 'Checking images'):\n if any(suffix in str(im_path) for suffix in ('label', 'nobkg', 'measure')):\n continue\n \n image = imread(str(im_path))\n\n if image.ndim != 4:\n errors = errors + f\" - Could not load {im_path}, expected 4 dimensions and found array of shape {image.shape}\\n\"\n\n paths.append(im_path)\n \n if errors != 'ERRORS\\n':\n print(errors)\n \n stage_count = {}\n for im_path in paths:\n stage = get_stage(im_path)\n if stage not in stage_count:\n stage_count[stage] = 1\n else:\n stage_count[stage] += 1\n\n print(f'{len(paths)} images found')\n print('-------------------------')\n for stage, count in stage_count.items():\n print(f'{stage.ljust(5)}: {count} images')\n\n return paths\n\n\ndef get_stage(path: Path) -> str:\n return re.findall(r'(?<=\\/)(bud|[0-9]+s)(?=\\/)', str(path))[0]\n\n\ndef write_label(im_path: Path, label: np.ndarray) -> None:\n lb_path = str(im_path.with_suffix('')) + '_label.tif'\n imwrite(lb_path, label)\n\n\ndef correct_intensities(image: np.ndarray, metadata: Dict) -> np.ndarray:\n \"\"\"Corrects image intensities according to exposure and laser power,\n it assumes axes are ordered according to wave-length.\n \"\"\"\n assert CELL_CHANNEL == 0\n\n corrected = image.copy()\n\n corrected[1, ...] = corrected[1, ...] *\\\n (metadata[\"LASERPOWER_405\"] / metadata[\"LASERPOWER_488\"]) *\\\n (metadata[\"EXPOSURE_405\"] / metadata[\"EXPOSURE_488\"])\n\n corrected[2, ...] = corrected[2, ...] *\\\n (metadata[\"LASERPOWER_405\"] / metadata[\"LASERPOWER_561\"]) *\\\n (metadata[\"EXPOSURE_405\"] / metadata[\"EXPOSURE_561\"])\n\n # OCT-4 is not corrected\n return corrected\n\n\ndef process(image: np.ndarray, metadata: Dict, display: bool = False, im_path: Optional[Path] = None) -> Tuple[pd.DataFrame, np.ndarray]:\n image = downscale_local_mean(cp.asarray(image), (1, Z_SCALE, 1, 1)).get()\n # image = correct_intensities(image, metadata) # NOTE: not used, corrected on the tabular data.\n\n # removing background \n no_bkg = np.stack([\n area_white_top_hat(image[i], 1e4, sampling=1, axis=0)\n for i in range(len(image))\n ])\n\n dapi = gaussian(cp.asarray(image[CELL_CHANNEL]), sigma=1).get()\n dapi_z_intensity = np.quantile(dapi, q=0.999, axis=(1, 2))\n\n # normalizing per z slice\n normalized = no_bkg / dapi_z_intensity[None, :, None, None]\n\n if display:\n import napari\n viewer = napari.Viewer()\n viewer.add_image(image, name='original', channel_axis=0)\n viewer.add_image(no_bkg, name='processed', channel_axis=0)\n viewer.add_image(normalized, name='normalized', channel_axis=0)\n napari.run()\n \n labels = segment_with_WS(\n image[CELL_CHANNEL],\n display=display\n )\n props: Sequence[RegionProperties] = regionprops(labels, normalized.transpose((1, 2, 3, 0)))\n\n df = []\n\n for p in props:\n prop_feats = p.intensity_image[p.image]\n expressions = SUMMARY_FUN(prop_feats, axis=0)\n row = [\n p.label, *p.centroid, p.image.sum(), *expressions,\n ]\n df.append(row)\n\n df = pd.DataFrame(\n df,\n columns=['label', 'z', 'y', 'x', 'area'] + CHANNELS[:len(no_bkg)],\n )\n\n # saving data\n if im_path is not None:\n measurements = np.zeros((*no_bkg.shape[1:], no_bkg.shape[0]) , dtype=np.float32)\n for p in props:\n prop_feats = SUMMARY_FUN(p.intensity_image[p.image], axis=0)\n measurements[p.slice][p.image] = prop_feats # / (prop_feats[CELL_CHANNEL] + 1e-8)\n\n measurements = measurements.transpose((3, 0, 1, 2))\n imwrite(str(im_path.with_suffix('')) + '_nobkg.tif', no_bkg)\n imwrite(str(im_path.with_suffix('')) + '_measure.tif', measurements)\n\n return df, labels\n\n\n@click.command('process')\n@click.option('--images-dir', '-i', type=click.Path(exists=True, path_type=Path), help='cropped image directory', required=True)\n@click.option('--out-path', '-o', type=click.Path(path_type=Path), help='.csv output path', required=True)\ndef process_cli(images_dir: Path, out_path: Path) -> None:\n\n im_paths = find_image_paths(images_dir)\n dfs = []\n\n with tqdm(im_paths, desc='Processing') as pbar:\n for im_path in pbar:\n pbar.set_description(desc=f'{im_path.name}')\n image = imread(str(im_path))\n\n with open(im_path.parent / 'metadata.yml') as f:\n metadata = yaml.safe_load(f)\n\n df, label = process(image, metadata=metadata, display=DISPLAY, im_path=im_path if SAVE else None)\n df['file'] = im_path.name.split('.', 1)[0]\n df['stage'] = get_stage(im_path)\n for k, v in metadata.items():\n if 'EXPOSURE' in k or 'LASERPOWER' in k:\n df[k] = v\n\n write_label(im_path, label)\n\n dfs.append(df)\n\n # updating at every iteration so I don't have to wait to it to finish\n df = pd.concat(dfs)\n df.to_csv(out_path, index=False)\n\n\n@click.command('figure')\n@click.option('--image-dir', '-i', required=True, type=click.Path(exists=True, path_type=Path), help='input images (*_nobkg.etc, *_labels.tiff, etc.) path')\n@click.option('--out-dir', '-o', required=True, type=click.Path(path_type=Path), help='output directory')\ndef figure_cli(image_dir: Path, out_dir: Path) -> None:\n out_dir.mkdir(exist_ok=True)\n\n image_path = str(next(image_dir.glob('*denoised.tif')))\n labels_path = image_path.replace('.tif', '_label.tif')\n nobkg_path = image_path.replace('.tif', '_nobkg.tif')\n measure_path = image_path.replace('.tif', '_measure.tif')\n\n with open(out_dir / 'LOG.txt', mode='w') as f:\n f.write(f'input directory: {image_dir}')\n\n v = napari.Viewer()\n v.window.resize(1080, 720)\n v.dims.ndisplay = 3\n\n def set_camera():\n v.camera.center = (25, 265, 235)\n v.camera.zoom = 1\n v.camera.angles = (-45, 55, 135)\n\n def screenshot(name):\n for i in range(3):\n v.layers[i].visible = True\n v.screenshot(out_dir / f'{name}_ch{i}.png')\n v.layers[i].visible = False\n \n v.add_image(imread(image_path), channel_axis=0, scale=(.5, 1, 1), visible=False)\n set_camera()\n screenshot('image')\n v.layers.clear()\n\n v.add_image(imread(nobkg_path), channel_axis=0, visible=False)\n set_camera()\n screenshot('nobkg')\n v.layers.clear()\n\n v.add_image(imread(measure_path), channel_axis=0, visible=False)\n set_camera()\n screenshot('measure')\n v.layers.clear()\n\n v.add_labels(imread(labels_path))\n set_camera()\n v.screenshot(out_dir / 'labels.png')\n\n\n@click.group()\ndef main():\n pass\n\n\nmain.add_command(process_cli)\nmain.add_command(figure_cli)\n\nif __name__ == '__main__':\n main()\n","repo_name":"royerlab/zebrahub-paper-hcr-analysis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"15227634519","text":"#!/usr/bin/env python\ntest = 'C-large'\n\ninput = open('%s.in'%test)\noutput = open('%s.out'%test, 'w')\n\n# import sys\n# output = sys.stdout\n\ndef solve(B, T, boxes, toys):\n cache = {}\n def head(N, skip, what):\n s = 0\n i = 0\n while s + what[i][0] <= skip:\n s += what[i][0]\n i += 1\n if i >= N:\n return (-1, 0)\n\n return (what[i][1], what[i][0] - (skip - s))\n\n def match(skip_b, skip_t):\n if cache.get((skip_b, skip_t)) is not None:\n return cache[(skip_b, skip_t)]\n box, box_count = head(B, skip_b, boxes)\n toy, toy_count = head(T, skip_t, toys)\n res = None\n if box == -1 or toy == -1:\n res = 0\n else:\n if box == toy:\n m = min(box_count, toy_count)\n res = match(skip_b+m, skip_t+m) + m\n else:\n res = max(match(skip_b, skip_t+toy_count), match(skip_b+box_count, skip_t))\n cache[(skip_b, skip_t)] = res\n return res\n res = match(0, 0)\n return res\n\n\nT = int(input.readline())\nfor t in xrange(T):\n B, T = map(int, input.readline().split())\n _boxes = map(int, input.readline().split())\n _toys = map(int, input.readline().split())\n boxes = []\n for i in xrange(B):\n boxes.append((_boxes[2*i], _boxes[2*i+1]))\n toys = []\n for i in xrange(T):\n toys.append((_toys[2*i], _toys[2*i+1]))\n print >>output, \"Case #%s:\"%(t+1), solve(B, T, boxes, toys)\n","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/CodeJamData/12/33/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"}
+{"seq_id":"7990929354","text":"from fastapi import FastAPI, HTTPException, status\nfrom typing import Union\nimport os\nfrom dotenv import dotenv_values\nfrom ai import gen_text, gen_image, compress_image, text_to_html, domain_to_name, gen_html\n\napp = FastAPI()\n\nconfig = {**dotenv_values(\".env\"), **os.environ}\nopenai_api_key = config['OPENAI_API_KEY']\ntinify_api_key = config['TINIFY_API_KEY']\nhtml_folder = config['HTML_FOLDER']\n\n@app.get(\"/\")\ndef read_root(type: Union[str, None] = None, domain: Union[str, None] = None):\n if type == None or domain == None:\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=\"Not enough parameters\")\n return {\"Dwarf API\"}\n\n@app.get(\"/api\")\ndef read_api(type: Union[str, None] = None, domain: Union[str, None] = None):\n if type == None or domain == None:\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=\"Not enough parameters\")\n if type == \"dwarf\":\n name = domain_to_name(domain)\n text_promt = f\"Short story abou dwarf with surname {name}. Make him some name. Story up to 300 words.\"\n image_promt = f\"Dwarf named {name} pencil drawing\"\n template = \"templates/dwarf.jinja2\"\n if type == \"star\":\n name = domain_to_name(domain)\n text_promt = f\"Tell me what do you know about star {name}. Add details who and when discovered it. Story up to 200 words.\"\n image_promt = f\"Star {name} in deep space\"\n template = \"templates/star.jinja2\"\n\n print(f\"Generating {type} {name} text\")\n raw_text = gen_text(text_promt, openai_api_key)\n text_html = text_to_html(raw_text)\n print(f\"Generating {type} {name} image\")\n image_url = gen_image(image_promt, openai_api_key)\n print(f\"Compressing {type} {name} image\")\n compress_image(image_url, tinify_api_key, name, html_folder)\n print(f\"Generating {type} {name} html\")\n gen_html(template, name, text_html, domain, html_folder)\n\n return {\"raw_text\": f\"{raw_text}\",\n \"html_text\": f\"{text_html}\",\n \"image_url\": f\"{image_url}\",\n }","repo_name":"kitich/dwarfs","sub_path":"app/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"16673937228","text":"\"\"\"\npython_charts\n\"\"\"\n\nfrom pyecharts.charts import Line\nfrom pyecharts.options import *\n\nline = Line()\nline.add_xaxis([\"周一\", \"周二\", \"周三\", \"周四\", \"周五\", \"周六\", \"周日\"])\nline.add_yaxis(\"商家A\", [120, 132, 101, 134, 90, 230, 210])\n\n# 全局对象\nline.set_global_opts(\n title_opts=TitleOpts(title=\"test\", pos_left=\"center\", pos_bottom=\"1%\"),\n legend_opts=LegendOpts(is_show=True),\n tooltip_opts=TooltipOpts(is_show=True),\n visualmap_opts=VisualMapOpts(is_show=True)\n)\n\nline.render()\n","repo_name":"BigRootMasters/python_learn","sub_path":"learn/learn_day/day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"42269984115","text":"from asyncio import sleep\nfrom time import sleep as s\nimport sys\nfrom discord import ExtensionError, Intents\nfrom discord.ext import commands\ntry:\n from lib import time_now, config, check_JSON_files, check_version, setup_change\nexcept ImportError:\n from datetime import datetime\n print(f\"{datetime.now().strftime('[%H:%M:%S]')} Lib is missing, consider using 'sdtb-setup.exe' to download the required dependencies\")\n s(5)\n sys.exit()\n\n\ncheck_version()\nsetup_change()\n\n\nif not check_JSON_files():\n print(f\"{time_now()} One or multiple JSON files are missing, check the wiki for more info\")\n s(5)\n sys.exit()\n\nelif len(config.token) < 30:\n print(f\"{time_now()} Your bot token is invalid, check the wiki for more info\")\n s(5)\n sys.exit()\n\nelif len(config.prefix.replace(\" \", \"\")) == 0:\n print(f\"{time_now()} Your bot prefix is invalid, check the wiki for more info\")\n s(5)\n sys.exit()\n\n\nintents = Intents.default()\nintents.members = True # pylint: disable=E0237\nbot = commands.Bot(\n command_prefix = config.prefix,\n case_insensitive = True,\n intents = intents\n)\n\n\n@bot.event\nasync def on_ready():\n print(f\"{time_now()} {bot.user.name} loading...\")\n\n try:\n bot.load_extension(\"tickets.tickets\")\n print(f\"{time_now()} Loaded 'tickets'\")\n except ExtensionError as exc:\n print(exc)\n await sleep(5)\n sys.exit()\n\n print(f\"{time_now()} {bot.user.name} online\")\n\n\n@bot.event\nasync def on_command_error(ctx, error):\n if isinstance(error, commands.CommandNotFound):\n print(f\"{time_now()} {ctx.message.author} tried using '{ctx.message.content}' but the command didn't exist. Maybe add it as an alias in config.json?\")\n\n if isinstance(error, commands.MissingPermissions):\n print(f\"{time_now()} {ctx.message.author} tried using '{ctx.message.content}' but they were lacking permissions.\")\n\n if isinstance(error, commands.BotMissingPermissions):\n print(f\"{time_now()} I don't have sufficient permissions. Make sure I have administrator!\")\n\n\nprint(f\"{time_now()} Consider changing to simple discord bot (https://github.com/Voided-Git/simple-discord-bot) from Voided, it is being actively maintained unlike this project.\")\n\nbot.run(config.token)\n","repo_name":"Voided-Git/simple-discord-ticket-bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"}
+{"seq_id":"18460893471","text":"#import\nimport pandas as pd\nimport numpy as np\n# read dataset\noriginal_dms_data = pd.read_csv('/Users/liza/Documents/Bioinfo Project/DMS_data/AAAA_GFP_dms_data_original_komplett.csv')\n# split first column of df into multiple columns\noriginal_dms_data_col = original_dms_data\nonly_mutants = original_dms_data[\"mutant\"].to_frame()\noriginal_dms_data_col[['m1', 'm2', 'm3', 'm4', 'm5', 'm6', 'm7', 'm8', 'm9', 'm10', 'm11', 'm12', 'm13', 'm14', 'm15']] = original_dms_data_col['mutant'].str.split(':', 15, expand=True)\n\n# count how many mutations each sequence has\nlist_mut_count_in_progress = []\nfor i in range(len(original_dms_data['mutant'])):\n list_mut_count_in_progress.append(original_dms_data['mutant'].iloc[i].count(':'))\nlist_mut_count_prae = np.array(list_mut_count_in_progress)\nlist_mut_count = (list_mut_count_prae + 1)\ndf_mutation_counts = pd.DataFrame(list_mut_count)\n\n#generate more convenient dataframe\nworking_dataframe_prae = pd.concat([original_dms_data_col, df_mutation_counts], axis=\"columns\")\n#drop columns we don´t need at the moment -> working_dataframe\nworking_dataframe = working_dataframe_prae.drop(['mutant', 'mutated_sequence', 'DMS_score_bin'], axis=1)\nworking_dataframe.rename(columns={working_dataframe.columns[16]: 'mut_count'}, inplace=True)\n#another dataframe for easy access -> nur_fscore_mut_count\nnur_fscore_mut_count = working_dataframe.loc[:, [\"DMS_score\", \"mut_count\"]]\n\n#goal: one list with all existing mutations in the dataset\n#-> all_possible_mutations\nworking_dataframe_only_ms = working_dataframe.loc[:, [\"m1\", \"m2\", \"m3\", 'm4', 'm5', 'm6', 'm7', 'm8', 'm9', 'm10', 'm11', 'm12', 'm13', 'm14', 'm15']]\nall_possible_mutations = working_dataframe_only_ms.values.flatten().tolist()\nall_possible_mutations = list(set(all_possible_mutations))\n#automatically: one \"none\" value: drop\nwhile None in all_possible_mutations:\n all_possible_mutations.remove(None)\nonly_mutants_list = only_mutants['mutant']\n\n#checkpoint 1\nprint('all_possible_mutations finished (1)')\n\n#again: generate more convenient dataframe, add mutation_count to original one\nworking_dataframe_prae = pd.concat([original_dms_data_col, df_mutation_counts], axis=\"columns\")\n#drop other columns\nworking_dataframe = working_dataframe_prae.drop(['mutant', 'mutated_sequence', 'DMS_score_bin'], axis=1)\nworking_dataframe.rename(columns={working_dataframe.columns[16]: 'mut_count'}, inplace=True)\n\n#goal: dataframe that contains boolians if the mutations from all_possible_mutations exist in the mutants (Kreuztabelle)\nlist_of_dfs = []\n\nfor i in all_possible_mutations:\n new_column_name = f'{i}'\n new_column_values = [only_mutants_list.str.contains(i, regex= False)]\n new_df = pd.DataFrame({new_column_name: new_column_values})\n new_df_exploded = new_df.explode(new_column_name)\n list_of_dfs.append(new_df_exploded)\nresult_how_often = pd.concat(list_of_dfs, axis=1)\nresult_how_often = result_how_often.reset_index(drop=True)\n\n## --> result_how_often (all_possible_mutations (columns), mutants (rows))\n\n# dataframe generated from original df: only fscore (= DMS_score) and mut_count\ncount_fscore_frame = working_dataframe[['DMS_score', 'mut_count']]\n\n#goal: calculate variance and the values used for variance calculation for each mutation and each mutocunt\n#normally: present it in graph (#) -> stored\nimport matplotlib.pyplot as plt\n\nfig, axes = plt.subplots(nrows=5, ncols=3, figsize=(19, 12)) # Abbildung und Achsenobjekte erstellen\nplt.subplots_adjust(wspace=0.4, hspace=0.6)\n\nscatter_plots = []\nfor j, ax in zip(range(2, 16), axes.flatten()):\n variance_per_mutant_list = []\n\n for i in all_possible_mutations:\n mut_count_fscore = count_fscore_frame.loc[result_how_often[i] == True]\n fscore_mut = mut_count_fscore['DMS_score'].loc[mut_count_fscore['mut_count'] == j]\n varianz_mut = fscore_mut.var()\n variance_per_mutant_list.append(varianz_mut)\n\n variance_per_mutant_series = pd.Series(variance_per_mutant_list, index=all_possible_mutations)\n variance_per_mutant_df = variance_per_mutant_series.to_frame()\n\n#-> variance_per_mutant\n\n how_many_for_variance = []\n\n for i in all_possible_mutations:\n mut_count_fscore = count_fscore_frame.loc[result_how_often[i] == True]\n fscore_mut = mut_count_fscore['DMS_score'].loc[mut_count_fscore['mut_count'] == j]\n wie_viel_jeweils = len(fscore_mut)\n how_many_for_variance.append(wie_viel_jeweils)\n\n how_many_for_variance = pd.Series(how_many_for_variance, index=all_possible_mutations)\n how_many_for_variance_df = how_many_for_variance.to_frame()\n# -> how_many_for_variance\n how_many_AND_variance_df = pd.concat([how_many_for_variance_df, variance_per_mutant_df], axis = 1)\n how_many_AND_variance_df.columns = ['Anzahl benutzter Werte', 'Varianz']\n how_many_AND_variance_df = how_many_AND_variance_df.dropna()\n\n#scatter plot\n ax.scatter(how_many_AND_variance_df['Anzahl benutzter Werte'],how_many_AND_variance_df['Varianz'], s = j )\n ax.set_xlabel('Anzahl benutzter Werte')\n ax.set_ylabel('Varianz')\n\n if \"V163A\" in how_many_AND_variance_df.index:\n ax.scatter(how_many_AND_variance_df['Anzahl benutzter Werte']['V163A'],how_many_AND_variance_df['Varianz']['V163A'], c='red')\n ax.set_title(f'für {j} Mutationen')\n\n scatter_plots.append(scatter_plot)\nsaved_plots = []\n\nfor scatter_plot in scatter_plots:\n fig.canvas.draw()\n plot_image = np.array(fig.canvas.renderer.buffer_rgba())\n saved_plots.append(plot_image)\n\nplt.close(fig)\n#plot saved in variable\n\n#checkpoint 2\nprint('variances plots finished (2)')\n\n#goal: variances per mutation (mean over all mutcounts)\nframe_zum_mitteln_variance = pd.DataFrame(index = all_possible_mutations)\nvariance_per_mutant_count_list = []\n\n# IMPORTANT: only mutants with a mutcount from 2 to 8 are considered for calculation -> boxplot (see output_variance_analysis.ipynb)\nfor j, ax in zip(range(2, 8), axes.flatten()):\n variance_per_mutant_list = []\n\n for i in all_possible_mutations:\n mut_count_fscore = count_fscore_frame.loc[result_how_often[i] == True]\n fscore_mut = mut_count_fscore['DMS_score'].loc[mut_count_fscore['mut_count'] == j]\n varianz_mut = fscore_mut.var() #variance per mutation per mutcount\n variance_per_mutant_list.append(varianz_mut) #list of variances per mutation per mutcount of all mutations\n\n variance_per_mutant_df = pd.DataFrame(variance_per_mutant_list, index=all_possible_mutations)\n variance_per_mutant_count_list.append(variance_per_mutant_df)\nvariance_per_mutant_count_df = pd.concat(variance_per_mutant_count_list, axis=1)\nvariance_per_mutant_count_df.set_axis(range(2,8), axis=1, inplace=True)\n# -> variance_per_mutant_count_df (per mutation per mutant count)\nmean_variances_per_mutations = pd.DataFrame(variance_per_mutant_count_df.mean(axis=1, skipna=True), columns=['Mean'])\n# mean_variances_per_mutations (all variances per all mutations (rows) per all counts (columns))\n\n#goal: dataframe with the mean fitness scores for each mutcount (-> weights)\nmean_fitness_scores = pd.DataFrame(index = range(2,16), columns = [\"mean_fitness_score\"])\nfor i in range(2,16):\n fscore_mutcount_mean = count_fscore_frame[\"DMS_score\"].loc[count_fscore_frame[\"mut_count\"] == i].mean()\n mean_fitness_scores.loc[i, \"mean_fitness_score\"] = fscore_mutcount_mean\n#->mean_fitness_scores (weights)\n\n#goal: dataframe that contains the amount of mutants a mutation is part of (how often does a mutation X appear in the dataset = Vorkommen)\nhow_many_per_mutant_count_list = []\n\nfor j, ax in zip(range(2, 8), axes.flatten()):\n how_many_for_variance = []\n\n for i in all_possible_mutations:\n mut_count_fscore = count_fscore_frame.loc[result_how_often[i] == True]\n fscore_mut = mut_count_fscore['DMS_score'].loc[mut_count_fscore['mut_count'] == j]\n wie_viel_jeweils = len(fscore_mut)\n how_many_for_variance.append(wie_viel_jeweils)\n\n how_many_per_mutant_df = pd.DataFrame(how_many_for_variance, index=all_possible_mutations)\n how_many_per_mutant_count_list.append(how_many_per_mutant_df)\nhow_many_per_mutant_count_df = pd.concat(how_many_per_mutant_count_list, axis=1)\nhow_many_per_mutant_count_df.set_axis(range(2,8), axis=1, inplace=True)\n\nmean_how_many_per_mutations = pd.DataFrame(how_many_per_mutant_count_df.mean(axis=1, skipna=True), columns=['Mean'])\n\n#goal: WEIGHTED DIFFERENCE: weighted_fscore_mean from the mutants WITH the mutation - weighted_fscore_mean from the mutants WITHOUT the mutation\n\n#first: WITH\n\n# Create an empty dataframe to store the results\nmean_for_differences_with_neu = pd.DataFrame(index=all_possible_mutations, columns=range(2, 16))\n\nfor mutation in all_possible_mutations:\n for mutation_count in range(2, 16):\n\n index_when_mut_present_weighted = result_how_often.loc[result_how_often[mutation] == True].index\n only_rows_with_mut_weighted = nur_fscore_mut_count[(nur_fscore_mut_count['mut_count'] == mutation_count) & (nur_fscore_mut_count.index.isin(index_when_mut_present_weighted))]\n # Calculate the mean of fitness score for the filtered rows\n mean_fitness_with_mut_score = only_rows_with_mut_weighted['DMS_score'].mean()\n mean_for_differences_with_neu.loc[mutation, mutation_count] = mean_fitness_with_mut_score\n\nweighted_means_WITH = pd.Series(index=mean_for_differences_with_neu.index)\n\nfor mutation in mean_for_differences_with_neu.index:\n row_WITH = mean_for_differences_with_neu.loc[mutation]\n non_nan_values_WITH = row_WITH.dropna()\n\n if len(non_nan_values_WITH) > 0:\n non_nan_weights_WITH = mean_fitness_scores.loc[non_nan_values_WITH.index]['mean_fitness_score']\n weighted_means_WITH[mutation] = np.average(non_nan_values_WITH, weights=non_nan_weights_WITH)\n\nweighted_means_df_WITH = pd.DataFrame({'Weighted Mean WITH': weighted_means_WITH})\n\n#NEXT: sam for WITHOUT\n\n\n","repo_name":"datascience-mobi-2023/topic02_team03","sub_path":"Epistasis_Analysis/Rankings/python files/weighted_rankings.py","file_name":"weighted_rankings.py","file_ext":"py","file_size_in_byte":9884,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"30858813034","text":"from sqlalchemy import func\n\nfrom model import Decade, Country, Book, User, connect_to_db, db\nfrom server import app\n\ndef load_decades():\n Decade.query.delete()\n\n for row in open(\"sql_data/decades\"):\n decade = row.rstrip()\n new_decade = Decade(decade=decade)\n db.session.add(new_decade)\n\n db.session.commit()\n\n\ndef load_countries():\n Country.query.delete()\n\n for row in open(\"sql_data/countries\"):\n country = row.rstrip()\n new_country = Country(country=country)\n db.session.add(new_country)\n\n db.session.commit()\n\n\ndef load_books():\n Book.query.delete()\n\n for row in open(\"sql_data/books\"):\n row = row.rstrip()\n title, author, pub_year, country, decade, word_set, bigram_dict = row.split(\"|\")\n\n new_book = Book(title=title, author=author, pub_year=pub_year, country=country, decade=decade,\n word_set=word_set, bigram_dict=bigram_dict)\n db.session.add(new_book)\n\n db.session.commit()\n\n\nif __name__ == \"__main__\":\n connect_to_db(app)\n\n # In case tables haven't been created, create them\n db.create_all()\n\n # Import different types of data\n load_decades()\n load_countries()\n load_books()\n","repo_name":"MarissaSkud/Wordsworth","sub_path":"seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"}
+{"seq_id":"17519802342","text":"def start():\n print('This is my Elephant Mouse Cat game!')\n Player_One = 'Jheremy'\n Player_Two = 'Justin'\n\n def choices(Player_One_Choice, Player_Two_Choice):\n if Player_One_Choice == 'elephant' and Player_Two_Choice == 'mouse':\n return(' Mouse covers Elephant! ' + Player_Two + ' wins!')\n elif Player_One_Choice == 'mouse' and Player_Two_Choice == 'elephant':\n return('Paper covers Elephant! ' + Player_One + ' wins!')\n elif Player_One_Choice == 'cat' and Player_Two_Choice == 'mouse':\n return('Scissors cuts mouse ' + Player_One + ' wins!')\n elif Player_One_Choice == 'elephant' and Player_Two_Choice == 'cat':\n return('Rock smashes Scissors! ' + Player_One + ' wins!')\n elif Player_One_Choice == 'elephant' and Player_Two_Choice == 'cat':\n return('Scissors cuts Mouse! ' + Player_Two + ' wins!')\n elif Player_One_Choice == 'cat' and Player_Two_Choice == 'elephant':\n return('Rock smashes Cat! ' + Player_Two + ' wins!')\n elif Player_One_Choice == Player_Two_Choice:\n return('Jheremy and Justin tied!')\n else:\n return('Please type Elephant, Mouse or Cat!')\n Player_One_Choose = input('Does ' + Player_One +\n ' choose Elephant, Mouse or Cat? ').lower()\n Player_Two_Choose = input('Does ' + Player_Two +\n ' choose Elephant, Mouse or Cat? ').lower()\n print(choices(Player_One_Choose, Player_Two_Choose))\n\n def Play_Again():\n Again = input(' Would you like to play the game again? ' ).lower()\n if Again == 'No'.lower():\n quit()\n if Again == 'Yes'.lower():\n start()\n else:\n print('Please enter Yes or No. Thank You!')\n Play_Again()\n Play_Again()\nstart()\n \n \n","repo_name":"psalazar5/PythonWork","sub_path":"Rock_Paper_Scissors_Game.py","file_name":"Rock_Paper_Scissors_Game.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"23257389517","text":"import os, sys\nimport torch\nfrom torchvision import datasets, transforms\n\nimport numpy as np\n\nfrom args import args\n\nimport torch.utils.data as data_utils\nfrom sklearn.utils import shuffle\n\n\n\nclass MNIST:\n def __init__(self):\n super(MNIST, self).__init__()\n\n data_root = os.path.join(args.data, \"mnist\")\n\n use_cuda = torch.cuda.is_available()\n\n train_dataset = datasets.MNIST(\n data_root,\n train=True,\n download=True,\n transform=transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]\n ),\n )\n\n # Data loading code\n kwargs = {\"num_workers\": args.workers, \"pin_memory\": True} if use_cuda else {}\n self.train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs\n )\n self.val_loader = torch.utils.data.DataLoader(\n datasets.MNIST(\n data_root,\n train=False,\n download=True,\n transform=transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]\n ),\n ),\n batch_size=args.batch_size,\n shuffle=True,\n **kwargs\n )\n\n def update_task(self, i):\n return\n\n\nclass FashionMNIST:\n def __init__(self):\n super(FashionMNIST, self).__init__()\n\n data_root = os.path.join(args.data, \"fashionmnist\")\n\n use_cuda = torch.cuda.is_available()\n\n train_dataset = datasets.FashionMNIST(\n data_root,\n train=True,\n download=True,\n transform=transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]\n ),\n )\n\n # Data loading code\n kwargs = {\"num_workers\": args.workers, \"pin_memory\": True} if use_cuda else {}\n self.train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs\n )\n self.val_loader = torch.utils.data.DataLoader(\n datasets.MNIST(\n data_root,\n train=False,\n download=True,\n transform=transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]\n ),\n ),\n batch_size=args.batch_size,\n shuffle=True,\n **kwargs\n )\n\n def update_task(self, i):\n return\n\n\n\nclass Permute(object):\n def __call__(self, tensor):\n out = tensor.flatten()\n out = out[self.perm]\n return out.view(1, 28, 28)\n\n def __repr__(self):\n return self.__class__.__name__\n\nclass MNISTPerm:\n def __init__(self):\n super(MNISTPerm, self).__init__()\n\n data_root = os.path.join(args.data, \"mnist\")\n\n use_cuda = torch.cuda.is_available()\n\n self.permuter = Permute()\n\n train_dataset = datasets.MNIST(\n data_root,\n train=True,\n download=True,\n transform=transforms.Compose(\n [\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,)),\n self.permuter,\n ]\n ),\n )\n\n # Data loading code\n kwargs = {\"num_workers\": args.workers, \"pin_memory\": True} if use_cuda else {}\n self.train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs\n )\n self.val_loader = torch.utils.data.DataLoader(\n datasets.MNIST(\n data_root,\n train=False,\n transform=transforms.Compose(\n [\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,)),\n self.permuter,\n ]\n ),\n ),\n batch_size=args.test_batch_size,\n shuffle=True,\n **kwargs\n )\n\n def update_task(self, i):\n np.random.seed(i + args.seed)\n self.permuter.__setattr__(\"perm\", np.random.permutation(784))\n\nclass PMNIST:\n def __init__(self):\n super(PMNIST, self).__init__()\n\n seed = args.seed\n tasknum = args.num_tasks\n np.random.seed(seed)\n data = {}\n taskcla = []\n size = [1, 28, 28]\n # Pre-load\n # MNIST\n mean = torch.Tensor([0.1307])\n std = torch.Tensor([0.3081])\n dat = {}\n dat['train'] = datasets.MNIST(args.data, train=True, download=True)\n dat['test'] = datasets.MNIST(args.data, train=False, download=True)\n \n for i in range(tasknum):\n print(i, end=',')\n sys.stdout.flush()\n data[i] = {}\n data[i]['name'] = 'pmnist-{:d}'.format(i)\n data[i]['ncla'] = 10\n permutation = np.random.permutation(28*28)\n for s in ['train', 'test']:\n if s == 'train':\n arr = dat[s].train_data.view(dat[s].train_data.shape[0],-1).float()\n label = torch.LongTensor(dat[s].train_labels)\n else:\n arr = dat[s].test_data.view(dat[s].test_data.shape[0],-1).float()\n label = torch.LongTensor(dat[s].test_labels)\n \n arr = (arr/255 - mean) / std\n data[i][s]={}\n data[i][s]['x'] = arr[:,permutation].view(-1, size[0], size[1], size[2])\n data[i][s]['y'] = label\n \n # Validation\n for t in range(tasknum):\n data[t]['train']['dataset'] = data_utils.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])\n data[t]['test']['dataset'] = data_utils.TensorDataset(data[t]['test']['x'], data[t]['test']['y'])\n\n # data[t]['valid'] = {}\n # data[t]['valid']['x'] = data[t]['train']['x'].clone()\n # data[t]['valid']['y'] = data[t]['train']['y'].clone()\n \n # r=np.arange(data[t]['train']['x'].size(0))\n # r=np.array(shuffle(r,random_state=seed),dtype=int)\n # nvalid=int(pc_valid*len(r))\n # ivalid=torch.LongTensor(r[:10000])\n # itrain=torch.LongTensor(r[10000:])\n # data[t]['valid']={}\n # data[t]['valid']['x']=data[t]['train']['x'][ivalid].clone()\n # data[t]['valid']['y']=data[t]['train']['y'][ivalid].clone()\n # data[t]['train']['x']=data[t]['train']['x'][itrain].clone()\n # data[t]['train']['y']=data[t]['train']['y'][itrain].clone()\n\n # Others\n n = 0\n for t in range(tasknum):\n taskcla.append((t, data[t]['ncla']))\n n += data[t]['ncla']\n data['ncla'] = n\n\n # Data loading code\n self.loaders = [\n (\n torch.utils.data.DataLoader(\n data[t]['train']['dataset'] , batch_size=args.batch_size, shuffle=True\n ),\n torch.utils.data.DataLoader(\n data[t]['train']['dataset'], batch_size=args.test_batch_size, shuffle=True\n ),\n torch.utils.data.DataLoader(\n data[t]['test']['dataset'], batch_size=args.test_batch_size, shuffle=True\n ),\n )\n for t in range(tasknum)\n ]\n\n def update_task(self, i):\n self.train_loader = self.loaders[i][0]\n self.val_loader = self.loaders[i][1]\n self.test_loader = self.loaders[i][2]","repo_name":"NguyenTriQuan/SupSup-baseline","sub_path":"data/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":7721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"31749816265","text":"import pygame\nimport random\nimport math\n\nfrom pygame import mixer\n\npygame.init()\n\n#Create screen\nscreen = pygame.display.set_mode((800, 600))\npygame.display.set_caption(\"Space Invaders\")\n\n#Background\nbackground = pygame.image.load('background.png')\n\n#Player\nplayerImg = pygame.image.load('space-invaders.png')\nplayerX= 370\nplayerY = 480\nplayer_change=0\n\n#Enemy\nenemyImg = []\nenemyX = []\nenemyY = []\nenemy_vel=3\nenemy_change_X =[]\nenemy_change_Y = []\nn_enemies = 6\n\nfor i in range(n_enemies):\n\tenemyImg.append(pygame.image.load('alien.png'))\n\tenemyX.append(random.randint(0,735))\n\tenemyY.append(50)\n\tenemy_change_X.append(enemy_vel)\n\tenemy_change_Y.append(40)\n\n#Bullet\nbulletImg = pygame.image.load('bullet.png')\nbulletX= 0\nbulletY = 480\nbullet_change_X=0\nbullet_change_Y = 20\nbullet_state=\"ready\"\n\n#Score\nscore_value = 0\nfont = pygame.font.Font('freesansbold.ttf', 32)\ntextX=10\ntextY=10\n\n#GameOver\ngo_font = pygame.font.Font('freesansbold.ttf', 64)\ndef game_over_text():\n\tg_over = go_font.render(\"GAME OVER\", True, (255, 255, 255))\n\tscreen.blit(g_over, (200,250))\n\ndef show_score(x,y):\n\tscore = font.render(\"Score: \" + str(score_value), True, (255, 255, 255))\n\tscreen.blit(score, (x,y))\n\ndef player(x, y):\n\tscreen.blit(playerImg, (x,y))\n\ndef enemy(x, y, i):\n\tscreen.blit(enemyImg[i], (x,y))\n\ndef fire_bullet(x,y):\n\tglobal bullet_state\n\tbullet_state =\"fire\"\n\tscreen.blit(bulletImg, (x + 16, y + 10))\n\ndef isCollision(enemyX, enemyY, bulletX, bulletY):\n\tdistance = math.sqrt(((enemyX-bulletX)**2 )+ ((enemyY-bulletY)**2))\n\tif distance <=27:\n\t\treturn True\n\telse:\n\t\treturn False\n\n#Game loop\nrunning = True\n\nwhile running:\n\tscreen.fill((0,0,0))\n\tscreen.blit(background, (0,0))\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\trunning=False\n\t\tif event.type == pygame.KEYDOWN:\n\t\t\tif event.key == pygame.K_LEFT:\n\t\t\t\tplayer_change = -4\n\t\t\tif event.key == pygame.K_RIGHT:\n\t\t\t\tplayer_change = 4\n\t\t\tif event.key == pygame.K_SPACE:\n\t\t\t\tif bullet_state == \"ready\":\n\t\t\t\t\tbullet_sound = mixer.Sound(\"laser.wav\")\n\t\t\t\t\tbullet_sound.play()\n\t\t\t\t\tbulletX = playerX\n\t\t\t\t\tfire_bullet(playerX, bulletY)\n\t\tif event.type == pygame.KEYUP:\n\t\t\tif event.key == pygame.K_LEFT or event.key ==pygame.K_RIGHT:\n\t\t\t\tplayer_change=0\n\t#Boundaries for player\t\t\t\n\tplayerX+=player_change\n\tif playerX<=0:\n\t\tplayerX=0\n\telif playerX >=736:\n\t\tplayerX=736\n\n\t#Enemy movement\n\tfor i in range(n_enemies):\n\t\tif enemyY[i]>440:\n\t\t\tfor j in range(n_enemies):\n\t\t\t\tenemyY[j]=2000\n\t\t\tgame_over_text()\n\t\t\tbreak\n\t\tenemyX[i]+=enemy_change_X[i]\n\t\tif enemyX[i]<=0:\n\t\t\tenemy_change_X[i]=enemy_vel\n\t\t\tenemyY[i]+=enemy_change_Y[i]\n\t\telif enemyX[i] >=736:\n\t\t\tenemy_change_X[i]=-enemy_vel\n\t\t\tenemyY[i]+=enemy_change_Y[i]\n\t\t#Collision\n\t\tcollision= isCollision(enemyX[i], enemyY[i], bulletX, bulletY)\n\t\tif collision:\n\t\t\texplosion = mixer.Sound(\"explosion.wav\")\n\t\t\texplosion.play()\n\t\t\tbulletY=480\n\t\t\tbullet_state=\"ready\"\n\t\t\tscore_value+=2\n\t\t\tenemy_vel += 0.1\n\t\t\tenemyX[i]= random.randint(0,735)\n\t\t\tenemyY[i] = 50\t\t\n\t\tenemy(enemyX[i], enemyY[i], i)\n\n\t#Bullet movement \n\tif bulletY <=0:\n\t\tbulletY = 480\n\t\tbullet_state=\"ready\"\n\tif bullet_state == \"fire\":\n\t\tfire_bullet(bulletX, bulletY)\n\t\tbulletY -= bullet_change_Y\n\n\t\n\tplayer(playerX, playerY)\n\tshow_score(textX, textY)\n\tpygame.display.update()","repo_name":"Panchofdez/Minigames","sub_path":"SpaceInvaders/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"36986767538","text":"import base64\nimport errno\nimport json\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport jsonschema\nimport yaml\n\nfrom dotty_dict import Dotty\nfrom getgauge.python import Messages, before_scenario, before_step, data_store, step\n\nfrom jsonschema import validate\n\nTEST_EXECUTION_FOLDER = \"./../test_execution\"\nWINDOWS_LINE_ENDING = b'\\r\\n'\nUNIX_LINE_ENDING = b'\\n'\n\n@step(\"Fill data store with kind \")\ndef fill_data_store_with_kind(kind):\n data_store.scenario.kind = kind\n\n@step(\"Fill data store with kind and case \")\ndef fill_data_store_with_kind_and_case(kind, case):\n data_store.scenario.kind = kind\n data_store.scenario.case = case.lower()\n\n@step(\"Fill data store with chart and suites \")\ndef fill_data_store_with_chart_and_suites(chart, suites):\n data_store.scenario.chart = chart\n s = list()\n if not suites == \"\":\n s = suites.split(',')\n s.append(\"basic\")\n data_store.scenario.suites = s\n\n@step(\"Fill data store with case , chart and suites \")\ndef fill_data_store_with_case_chart_suites(case, chart, suites):\n data_store.scenario.case = case.lower()\n data_store.scenario.chart = chart\n s = list()\n if not suites == \"\":\n s = suites.split(',')\n s.append(\"basic\")\n data_store.scenario.suites = s\n\n@step(\"Fill data store with kind , case , chart and suites \")\ndef fill_data_store(kind, case, chart, suites):\n fill_data_store_with_kind_and_case(kind, case)\n fill_data_store_with_chart_and_suites(chart, suites)\n \n@step(\"Copy folders to test execution folder\")\ndef copy_folders_to_TEST_EXECUTION_FOLDER():\n copy_the_test_suites_source_folders_to_TEST_EXECUTION_FOLDER()\n copy_the_test_source_folder_to_TEST_EXECUTION_FOLDER()\n copy_the_test_chart_folders_to(data_store.scenario.case, data_store.scenario.chart)\n copy_the_hull_chart_files_to_test_execution_folder()\n\n@step(\"Copy the test source folder to test execution folder\")\ndef copy_the_test_source_folder_to_TEST_EXECUTION_FOLDER():\n copy_the_test_chart_folders_to(data_store.scenario.case, data_store.scenario.chart)\n\n@step(\"Copy the test source folder for case and chart to test execution folder\")\ndef copy_the_test_chart_folders_to(case, chart):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n src_path_case = os.path.join(dir_path,'./../sources/cases', case)\n src_path_chart = os.path.join(dir_path,'./../sources/charts', chart)\n dst_path = os.path.join(dir_path, TEST_EXECUTION_FOLDER, 'case', case, 'chart', chart)\n try:\n copytree(src_path_case, dst_path)\n copytree(src_path_chart, dst_path)\n except Exception as e:\n print(\"Oops!\", e.__str__, \"occurred.\")\n assert False\n assert True\n\n@step(\"Copy the suites source folders to test execution folder\")\ndef copy_the_test_suites_source_folders_to_TEST_EXECUTION_FOLDER():\n for suite in data_store.scenario.suites:\n copy_the_suite_source_folder_for_case_and_chart_and_suite_to_TEST_EXECUTION_FOLDER(data_store.scenario.case, data_store.scenario.chart, suite) \n\n@step(\"Copy the suite source folder for case and chart and suite to test execution folder\")\ndef copy_the_suite_source_folder_for_case_and_chart_and_suite_to_TEST_EXECUTION_FOLDER(case, chart, suite):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n src_path = os.path.join(dir_path,'./../sources/cases/',suite, suite + '.values.hull.yaml')\n dst_path = os.path.join(dir_path, TEST_EXECUTION_FOLDER, 'case', case, 'chart', chart)\n try:\n with open(src_path, 'r') as file:\n data = file.read()\n data = data.replace(\"\",case)\n if not os.path.isdir(dst_path):\n os.makedirs(dst_path)\n dst_file = open(os.path.join(dst_path, suite + \".values.hull.yaml\"), \"w\")\n dst_file.write(data)\n dst_file.close()\n \n except Exception as e:\n print(\"Oops!\", e.__str__, \"occurred.\")\n assert False\n assert True\n\n\n@step(\"Clean the test execution folder\")\ndef delete_the_TEST_EXECUTION_FOLDER():\n dir_path = os.path.dirname(os.path.realpath(__file__))\n dst_path = os.path.join(dir_path, TEST_EXECUTION_FOLDER, \"case\", data_store.scenario.case)\n if os.path.isdir(dst_path):\n shutil.rmtree(dst_path, ignore_errors=True)\n assert True \n\n@step(\"Copy the HULL chart files to test execution folder\")\ndef copy_the_hull_chart_files_to_test_execution_folder():\n copy_the_hull_chart_files_to_test_object_in_chart(data_store.scenario.case, data_store.scenario.chart)\n\n@step(\"Copy the HULL chart files for test case and chart to test execution folder\")\ndef copy_the_hull_chart_files_to_test_object_in_chart(case, chart):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n hull_path = os.path.join(dir_path,'./../../../../')\n dst_path = os.path.join(dir_path, TEST_EXECUTION_FOLDER, 'case', case, 'chart', chart, \"charts/hull-1.0.0/\")\n try:\n copyfile(hull_path, \"Chart.yaml\", dst_path)\n copyfile(hull_path, \"README.md\", dst_path)\n copyfile(hull_path, \"values.schema.json\", dst_path)\n copyfile(hull_path, \"values.yaml\", dst_path)\n copyfile(hull_path, \"hull.yaml\", os.path.join(dir_path, TEST_EXECUTION_FOLDER, 'case', case, 'chart', chart, \"templates\"))\n copytree(os.path.join(hull_path, \"templates\"), os.path.join(dst_path, \"templates\"))\n except Exception as e:\n print(\"Oops!\", e.__str__, \"occurred.\")\n assert False\n assert True \n\n@step(\"Fail to render the templates for values file to test execution folder because error contains \")\ndef fail_to_render_the_templates_for_values_file_to_TEST_EXECUTION_FOLDER_because_error_contains(values_file, expected_error):\n fail_to_render_the_templates_for_test_case_and_chart_and_values_file(data_store.scenario.case, data_store.scenario.chart, values_file, expected_error)\n\n@step(\"Fail to render the templates for test case and chart and values file to test execution folder because error contains \")\ndef fail_to_render_the_templates_for_test_case_and_chart_and_values_file(case, chart, values_file, expected_error):\n result = render_chart(case, chart, values_file)\n if result.returncode != 0 and expected_error in str(result.stdout):\n assert True\n else:\n assert False, \"With ExitCode \" + str(result.returncode) + \", expected error \" + expected_error + \" not found in STDOUT: \" + str(result.stdout)\n\n@step(\"Render the templates for values file to test execution folder\")\ndef render_the_templates_for_values_file_to_TEST_EXECUTION_FOLDER(values_file):\n result = render_chart(data_store.scenario.case, data_store.scenario.chart, values_file)\n #if result.returncode == 0:\n # render_path = get_render_path(data_store.scenario.case, data_store.scenario.chart, values_file)\n # with open(render_path) as reader:\n # for line in reader.readlines():\n # Messages.write_message(line)\n assert result.returncode == 0, \"With ExitCode \" + str(result.returncode) + \" STDOUT was:\\n\\n\" + str(result.stdout) + \"\\n\\n and STDERR\\n\\n: \" + str(result.stderr)\n\n\n\n@step(\"Fill data store with rendered objects\")\ndef fill_data_store_with_rendered_objects():\n get_objects(data_store.scenario.case, data_store.scenario.chart)\n\n@step(\"Expected number of objects of kind were rendered\")\ndef check_that_expected_number_of_objects_of_kind_was_rendered(count, kind):\n found = len(data_store.scenario[\"objects_\" + kind])\n assert int(count) == found, \"Expected \" + str(count) + \" but found \" + str(found)\n\n@step(\"Expected number of objects were rendered\")\ndef check_that_expected_number_of_objects_was_rendered(count):\n check_that_expected_number_of_objects_of_kind_was_rendered(count, data_store.scenario.kind)\n\n@step(\"Set test object to of kind \")\ndef set_test_object_to_of_kind(name, kind):\n data_store.scenario.test_object = data_store.scenario[\"objects_\" + kind][name] \n\n@step(\"Set test object to \")\ndef set_test_object_to(name):\n data_store.scenario.test_object = data_store.scenario[\"objects_\" + data_store.scenario.kind][name] \n\n@step(\"Test Object has key with array value that has items\")\ndef test_object_has_key_with_array_value_that_has_items(key, value):\n assert data_store.scenario.test_object != None\n if isinstance(data_store.scenario.test_object[key], list): \n assert_values_equal(len(data_store.scenario.test_object[key]), int(value), key)\n else:\n assert False\n\n@step(\"Test Object has key with value \")\ndef test_object_has_key_with_value(key, value):\n assert \"test_object\" in data_store.scenario != None, \"No Test Object set!\"\n assert data_store.scenario.test_object != None, \"Test Object set to None!\"\n assert_values_equal(data_store.scenario.test_object[key], value, key)\n\n@step(\"Test Object has key set to true\")\ndef test_object_has_key_set_to_true(key):\n test_object_has_key_with_value(key, True)\n\n@step(\"Test Object has key set to false\")\ndef test_object_has_key_set_to_false(key):\n test_object_has_key_with_value(key, False)\n\n\n@step(\"Test Object does not have key \")\ndef test_object_does_not_have_key(key):\n assert \"test_object\" in data_store.scenario != None, \"No Test Object set!\"\n assert data_store.scenario.test_object != None, \"Test Object set to None!\"\n try:\n data_store.scenario.test_object[key] \n except Exception as e: \n if e.__class__.__name__ == 'KeyError':\n assert True\n return\n assert False\n\n\n\n@step(\"Test Object has key with integer value \")\ndef test_object_has_key_with_integer_value(key, value):\n assert \"test_object\" in data_store.scenario != None, \"No Test Object set!\"\n assert data_store.scenario.test_object != None, \"Test Object set to None!\"\n assert_values_equal(data_store.scenario.test_object[key], int(value), key)\n\n@step(\"Test Object has key with Base64 encoded value of \")\ndef test_object_has_key_with_base64_encoded_value(key, value):\n assert data_store.scenario.test_object != None\n decoded = base64.b64decode(data_store.scenario.test_object[key])\n assert_values_equal(\n value.encode('UTF-8').replace(WINDOWS_LINE_ENDING, UNIX_LINE_ENDING), \n decoded.replace(WINDOWS_LINE_ENDING, UNIX_LINE_ENDING), key)\n \n@step(\"Test Object has key with value of key from scenario data_store\")\ndef test_object_has_key_with_value_of_key_from_scenario_data_store(key, scenario_key):\n test_object_has_key_with_value(key, data_store.scenario[scenario_key])\n \n@step(\"All test objects have key with value \")\ndef all_test_objects_have_key_with_value(key, value):\n test_objects = data_store.scenario[\"objects_\" + data_store.scenario.kind]\n for i in test_objects:\n set_test_object_to(i)\n test_object_has_key_with_value(key, value) \n\n@step(\"All test objects have key with value of key from scenario data_store\")\ndef all_test_objects_have_key_with_value_of_key_from_data_store(key, scenario_key):\n test_objects = data_store.scenario[\"objects_\" + data_store.scenario.kind]\n for i in test_objects:\n set_test_object_to(i)\n test_object_has_key_with_value_of_key_from_scenario_data_store(key, scenario_key)\n\n@step(\"All test objects have key with Base64 encoded value of \")\ndef all_test_objects_have_key_with_base64_value(key, value):\n test_objects = data_store.scenario[\"objects_\" + data_store.scenario.kind]\n for i in test_objects:\n set_test_object_to(i)\n test_object_has_key_with_base64_encoded_value(key, value)\n\n@step(\"Validate\")\ndef validate():\n test_objects = data_store.scenario.objects\n for i in test_objects:\n validate_test_object_against_json_schema(i)\n\n@step(\"Validate test object against JSON Schema\")\ndef validate_test_object_against_json_schema(test_object):\n assert test_object != None \n validateJson(test_object)\n\n### non-steps\n\ndef assert_values_equal(actual, expected, key=None):\n assert expected == actual, \"For key '\"+ key + \"' there was expected value:\\n\\n\" + str(expected) + \"\\n\\nbut found:\\n\\n\" + str(actual) + \"\\n\\n\"\n\ndef validateJson(test_object):\n \n schema_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"./../schema\")\n schema_version_split = str(test_object[\"apiVersion\"]).split(\"/\")\n schema_version = schema_version_split[0].split(\".\")[0]\n if len(schema_version_split) > 1:\n schema_version = schema_version + \"-\" + schema_version_split[1]\n schema_file = os.path.join(schema_dir, str(test_object[\"kind\"]).lower().split(\".\")[0] + \"-\" + schema_version + \".json\")\n if not os.path.exists(schema_file):\n schema_file = os.path.join(schema_dir, str(test_object[\"kind\"]).lower().split(\".\")[0] + \"-core-\" + schema_version + \".json\")\n \n with open(os.path.join(schema_dir, schema_file)) as json_file:\n schema = json.load(json_file)\n\n jsonschema.validate(instance=test_object.to_dict(), schema=schema)\n \ndef get_render_path(case, chart, values_file):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n return os.path.join(dir_path, TEST_EXECUTION_FOLDER, 'case', case, 'rendered', chart, 'templates', 'hull.yaml')\n\ndef render_chart(case, chart, values_file):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n chart_path = os.path.join(dir_path, TEST_EXECUTION_FOLDER, 'case', case, 'chart', chart)\n render_path = os.path.join(dir_path, TEST_EXECUTION_FOLDER, 'case', case, 'rendered')\n \n if not os.path.isdir(render_path):\n os.makedirs(render_path)\n \n suites = ()\n for suite in data_store.scenario.suites:\n suites += (\"-f\", os.path.join(chart_path, suite + \".values.hull.yaml\"))\n \n args = (\"helm\", \"template\", chart_path, \"--debug\", \"--output-dir\", render_path) + suites + (\"-f\", os.path.join(chart_path, values_file))\n \n popen = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n print('STDOUT:\\n', popen.stdout.decode(\"utf-8\").replace(\"\\n\",os.linesep))\n print('STDERR:\\n', popen.stderr.decode(\"utf-8\").replace(\"\\n\",os.linesep) if popen.stderr is not None else \"\")\n return popen\n\ndef copytree(src, dst, symlinks=False, ignore=None):\n if not os.path.exists(dst):\n os.makedirs(dst)\n for item in os.listdir(src):\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n if os.path.isdir(s):\n copytree(s, d, symlinks, ignore)\n else:\n if not os.path.exists(d) or os.stat(s).st_mtime - os.stat(d).st_mtime > 1:\n shutil.copy2(s, d)\n\ndef copyfile(src_dir, src_filename, dst_dir):\n if not os.path.isdir(dst_dir):\n os.makedirs(dst_dir)\n shutil.copyfile(os.path.join(src_dir, src_filename), os.path.join(dst_dir, src_filename))\n\ndef get_objects(case, chart):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n rendered_file_path = os.path.join(dir_path, TEST_EXECUTION_FOLDER, 'case', case, 'rendered', chart, 'templates', 'hull.yaml')\n\n assert os.path.isfile(rendered_file_path)\n \n items = []\n with open(rendered_file_path) as file_in:\n \n item = None\n itemIndex = -1\n for line in file_in:\n if line.startswith(\"---\"): \n items.append([])\n items[itemIndex].append(line)\n \n data_store.scenario.objects = []\n for key in list(data_store.scenario.keys()):\n if key.startswith(\"objects_\"):\n data_store.scenario[key] = dict()\n \n for i in items:\n \n item = Dotty(yaml.safe_load(\"\".join(i)), separator='§')\n data_store.scenario.objects.append(item)\n if not (\"objects_\" + item['kind']) in data_store.scenario:\n data_store.scenario[\"objects_\" + item['kind']] = dict()\n data_store.scenario[\"objects_\" + item['kind']][item['metadata§name']] = item\n\n #with open(os.path.join(dir_path, \"./k8s_api_strict.json\")) as json_file:\n # schema = json.load(json_file)\n # data_store.scenario[\"schema\"] = schema\n","repo_name":"Munir-debug/hull","sub_path":"hull/files/test/HULL/step_impl/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":16374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"}
+{"seq_id":"35986991287","text":"from __future__ import print_function\n# just to be backward compatible with python 2, doesn't affect python 3\n\n\ndef introduction(info):\n text = print(\n \"Hello World, this is {name} with HNGi7 ID {hngi7_id} and email {email} using {language} for stage 2 task\".format(\n **my_info\n )\n )\n\n return text\n\n\nmy_info = {\n \"name\": \"Arbaaz Khan\",\n \"hngi7_id\": \"HNG-00574\",\n \"email\": \"crispyzingy@gmail.com\",\n \"language\": \"Python\",\n}\n\nintroduction(my_info)\n","repo_name":"lollykrown/python-spawn-test","sub_path":"scripts/Arbaaz.py","file_name":"Arbaaz.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"70676995432","text":"import random\r\n\r\nMUTATION_RATE = 0.05\r\n\r\nclass BitIndividual:\r\n def __init__(self, n_bits):\r\n '''Crea un nuevo individuo con un número binario al azar\r\n '''\r\n self.n_bits = n_bits\r\n self.bits = bin(random.getrandbits(n_bits))[2:]\r\n if len(self.bits) < n_bits:\r\n self.bits = \"0\" * (n_bits - len(self.bits)) + self.bits\r\n\r\n def reproduce(self, another_bit_individual):\r\n '''Crea un nuevo individuo, a base de este y otro individuo, \r\n con mutaciones al azar\r\n '''\r\n child = BitIndividual(self.n_bits)\r\n\r\n division = random.randint(0, self.n_bits)\r\n child.bits = self.bits[:division] + another_bit_individual.bits[division:]\r\n\r\n mutated_bits = \"\"\r\n\r\n for i in range(self.n_bits):\r\n if random.random() < MUTATION_RATE:\r\n mutated_bits += \"0\" if child.bits[i] == \"1\" else \"1\"\r\n else:\r\n mutated_bits += child.bits[i]\r\n\r\n child.bits = mutated_bits\r\n\r\n return child\r\n","repo_name":"danno-s/genetic-algorithm","sub_path":"src/bit_individual.py","file_name":"bit_individual.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"7301748761","text":"import pandas as pd\nfrom dash import Dash, dcc, html, Input, Output\nimport json\nimport random\nfrom src.hex_fig import get_hex_fig, hex_point_groups\nfrom src.location_blurb import get_location_blurb\nfrom src.weather_blurb import get_weather_blurb\n\n\n# temporarily trimming dataset for testing\n# generate fake fire data per location\nlocations = json.load(open('data/locations.json'))\nmock_df = pd.DataFrame(\n [\n location + [random.random(), random.randint(2018, 2021)]\n for location in locations[::15]\n ],\n columns=['lat', 'lon', 'fire_score', 'year']\n)\n\n\napp = Dash(__name__)\nserver = app.server\napp.layout = html.Div([\n html.H2(\n id='click-receiver'\n ),\n\n html.H3(\n id='weather-blurb'\n ),\n\n dcc.Graph(\n figure=get_hex_fig(mock_df),\n id='fire-graph',\n className='bordered'\n )\n], style={'text-align': 'center'})\n\n\n# click callback example\n@app.callback(\n Output('click-receiver', 'children'),\n Output('click-receiver', 'location'),\n [Input('fire-graph', 'clickData')]\n)\ndef display_location(clickData):\n print(clickData)\n point_num = clickData['points'][0]['pointNumber']\n print(f'Data @ point {point_num}: {hex_point_groups[point_num]}')\n return get_location_blurb(clickData)\n\n\n@app.callback(\n Output('weather-blurb', 'children'),\n Input('click-receiver', 'location')\n)\ndef display_weather_blurb(location):\n return get_weather_blurb(location)\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"PL3B3/dash_experiment","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"33528971602","text":"from torch import nn\nimport torch.nn.functional as F\n\nclass ResidualBlock(nn.Module):\n def __init__(self, in_features):\n super(ResidualBlock, self).__init__()\n\n self.conv_block = nn.Sequential(\n nn.ReflectionPad2d(1),\n nn.Conv2d(in_features, in_features, 3),\n nn.InstanceNorm2d(in_features),\n nn.ReLU(inplace=True),\n nn.ReflectionPad2d(1),\n nn.Conv2d(in_features, in_features, 3),\n nn.InstanceNorm2d(in_features))\n\n def forward(self, x):\n return x + self.conv_block(x)\n\nclass Generator(nn.Module):\n def __init__(self, n_residual_blocks=9):\n super(Generator, self).__init__()\n\n # Initial convolution block \n self.input_conv = nn.Sequential(\n nn.ReflectionPad2d(3),\n nn.Conv2d(3, 64, 7),\n nn.InstanceNorm2d(64),\n nn.ReLU(inplace=True))\n\n # Downsampling\n self.downsampling = nn.Sequential(\n nn.Conv2d(64, 128, 3, stride=2, padding=1),\n nn.InstanceNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 256, 3, stride=2, padding=1),\n nn.InstanceNorm2d(256),\n nn.ReLU(inplace=True))\n\n # Residual blocks\n self.res = []\n for _ in range(n_residual_blocks):\n self.res += [ResidualBlock(256)]\n self.residual_blocks = nn.Sequential(*self.res)\n\n # Upsampling\n self.upsampling = nn.Sequential(\n nn.ConvTranspose2d(256, 128, 3, stride=2, padding=1, output_padding=1),\n nn.InstanceNorm2d(128),\n nn.ReLU(inplace=True),\n\n nn.ConvTranspose2d(128, 64, 3, stride=2, padding=1, output_padding=1),\n nn.InstanceNorm2d(64),\n nn.ReLU(inplace=True))\n\n # Output layer\n self.output = nn.Sequential(\n nn.ReflectionPad2d(3),\n nn.Conv2d(64, 3, 7),\n nn.Tanh())\n\n def forward(self, x):\n x = self.input_conv(x)\n x = self.downsampling(x)\n x = self.residual_blocks(x)\n x = self.upsampling(x)\n x = self.output(x)\n return x\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n\n # A bunch of convolutions one after another\n self.features_extractor = nn.Sequential(\n nn.Conv2d(3, 64, 4, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Conv2d(64, 128, 4, stride=2, padding=1),\n nn.InstanceNorm2d(128), \n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Conv2d(128, 256, 4, stride=2, padding=1),\n nn.InstanceNorm2d(256), \n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Conv2d(256, 512, 4, padding=1),\n nn.InstanceNorm2d(512), \n nn.LeakyReLU(0.2, inplace=True))\n\n # FCN classification layer\n self.classificator = nn.Conv2d(512, 1, 4, padding=1)\n\n def forward(self, x):\n x = self.features_extractor(x)\n x = self.classificator(x)\n # Average pooling and flatten\n return F.avg_pool2d(x, x.size()[2:]).view(x.size()[0], -1)","repo_name":"sergeisoly/CycleGAN","sub_path":"nets.py","file_name":"nets.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"141272549","text":"import datetime\nimport pytz\n\nimport grpc\n\nfrom pytest import mark\nfrom google.protobuf.timestamp_pb2 import Timestamp\nfrom yandex.cloud.priv.loadtesting.v1 import tank_job_pb2 as job_message\nfrom yandex.cloud.priv.loadtesting.v1 import tank_job_service_pb2 as job_service\nfrom load.projects.cloud.loadtesting.db.tables import JobTable, JobStatus, AmmoTable, TankTable\nimport yandex.cloud.priv.loadtesting.v1.storage_pb2 as storage_message\n\n\n@mark.usefixtures(\n 'patch_iam_authenticate',\n 'patch_iam_authorize',\n 'patch_iam_get_token',\n 'patch_db_operation_add',\n 'patch_db_job_delete',\n)\n@mark.parametrize(('status', 'exp_operation'), [\n (JobStatus.RUNNING.value, grpc.StatusCode.FAILED_PRECONDITION),\n (JobStatus.PREPARING.value, grpc.StatusCode.FAILED_PRECONDITION),\n (JobStatus.AUTOSTOPPED.value, True),\n (JobStatus.FAILED.value, True),\n (JobStatus.CREATED.value, True),\n (JobStatus.STOPPED.value, True)]\n)\ndef test_delete_job(job_service_stub, patch_db_job_get, status, exp_operation):\n patch_db_job_get.return_value = JobTable(id='job_id', folder_id='folder_id', status=status)\n try:\n operation = job_service_stub.Delete(\n job_service.DeleteTankJobRequest(\n id='job_id'),\n metadata=(('authorization', 'Bearer bebearer'),))\n assert operation.description == 'Delete Job'\n if exp_operation is not True:\n assert False, 'We should never be here'\n except grpc.RpcError as error:\n assert error.code() == exp_operation\n\n\n@mark.usefixtures('patch_iam_authenticate', 'patch_iam_authorize', 'patch_iam_get_token')\ndef test_get_report(job_service_stub, patch_db_job_get):\n imbalance_time = datetime.datetime.utcfromtimestamp(1643657426)\n patch_db_job_get.return_value = JobTable(\n id='job_id', folder_id='folder_id',\n imbalance_point=12, imbalance_ts=imbalance_time)\n result = job_service_stub.GetReport(\n job_service.GetReportRequest(job_id='job_id'),\n metadata=(('authorization', 'Bearer bebearer'),)\n )\n assert result == job_message.TankReport(\n job_id='job_id',\n imbalance_point=12,\n imbalance_ts=int(imbalance_time.astimezone(pytz.utc).timestamp()),\n imbalance_at=Timestamp(seconds=1643657426)\n )\n\n\n@mark.parametrize(\n ('field_name', 'field_value'),\n [\n ('name', 'New name'),\n ('description', 'New description'),\n ('favorite', True),\n ('favorite', False),\n ('target_version', '1.2'),\n ('imbalance_ts', 1650311116),\n ('imbalance_point', 12),\n ('imbalance_point', 0)\n ]\n)\n@mark.usefixtures(\n 'patch_iam_authenticate', 'patch_iam_authorize', 'patch_iam_get_token',\n 'patch_db_operation_add',\n)\ndef test_update_job(job_service_stub, patch_db_job_get, patch_db_job_add, field_name, field_value):\n job = JobTable(id='job_id', folder_id='folder_id')\n patch_db_job_get.return_value = job\n patch_db_job_add.return_value = job\n update_request = job_service.UpdateTankJobRequest(id='job_id')\n update_request.__setattr__(field_name, field_value)\n\n result = job_service_stub.Update(update_request, metadata=(('authorization', 'Bearer bebearer'),))\n assert result.done is True\n\n\n@mark.usefixtures(\n 'patch_iam_authenticate', 'patch_iam_authorize', 'patch_iam_get_token',\n 'patch_db_operation_add',\n)\ndef test_update_job_imbalance_at(job_service_stub, patch_db_job_get, patch_db_job_add):\n job = JobTable(id='job_id', folder_id='folder_id')\n patch_db_job_get.return_value = job\n patch_db_job_add.return_value = job\n update_request = job_service.UpdateTankJobRequest(\n id='job_id',\n imbalance_at=Timestamp(seconds=1652885870, nanos=255387067),\n )\n result = job_service_stub.Update(update_request, metadata=(('authorization', 'Bearer bebearer'),))\n assert result.done is True\n\n\n@mark.usefixtures(\n 'patch_iam_authenticate', 'patch_iam_get_token',\n 'patch_db_job_add', 'patch_db_operation_add',\n 'patch_db_operation_update',\n)\ndef test_create_with_test_data(job_service_stub, patch_db_tank_get, patch_db_job_get, patch_iam_authorize,\n patch_db_ammo_get_by_name, patch_aws_check_access_to_file):\n patch_db_ammo_get_by_name.return_value = AmmoTable(s3_name='my_ammo', folder_id='folder')\n patch_db_job_get.return_value = JobTable(id='test_id')\n patch_db_tank_get.return_value = TankTable(\n folder_id='folder',\n id='agent_id'\n )\n operation = job_service_stub.Create(\n job_service.CreateTankJobRequest(\n folder_id='folder',\n name='req name',\n description='req dsc',\n generator=job_message.TankJob.Generator.PHANTOM,\n target_address='req_target',\n target_port=11,\n target_version='req version',\n tank_instance_id='agent_id',\n load_schedule={'load_type': 'RPS', 'load_profile': ['const(30, 30)']},\n test_data=storage_message.StorageObject(object_storage_filename='ammo', object_storage_bucket='bucket')\n ),\n metadata=(('authorization', 'Bearer bebearer'),))\n response = job_message.TankJob()\n operation.response.Unpack(response)\n assert 'my_ammo' in response.config\n patch_aws_check_access_to_file.assert_called()\n\n\n@mark.usefixtures(\n 'patch_iam_authenticate', 'patch_iam_get_token',\n 'patch_db_job_add', 'patch_db_operation_add',\n 'patch_db_operation_update',\n)\ndef test_create_with_test_data_config(job_service_stub, patch_db_tank_get, patch_db_job_get, patch_iam_authorize,\n patch_db_ammo_get_by_name, patch_aws_check_access_to_file):\n patch_db_ammo_get_by_name.return_value = AmmoTable(s3_name='my_ammo', folder_id='folder')\n patch_db_job_get.return_value = JobTable(id='test_id')\n patch_db_tank_get.return_value = TankTable(\n folder_id='folder',\n id='agent_id'\n )\n operation = job_service_stub.Create(\n job_service.CreateTankJobRequest(\n folder_id='folder',\n config=\"{\\\"phantom\\\": {\\\"enabled\\\": true, \\\"package\\\": \\\"yandextank.plugins.Phantom\\\", \\\"address\\\": \\\"req_target:11\\\", \\\n \\\"ammo_type\\\": \\\"phantom\\\", \\\"load_profile\\\": {\\\"load_type\\\": \\\"rps\\\", \\\"schedule\\\": \\\"const(30, 30)\\\"}, \\\"ssl\\\": false, \\\"uris\\\": []}, \\\n \\\"core\\\": {}, \\\"cloudloader\\\": {\\\"enabled\\\": true, \\\"package\\\": \\\"yandextank.plugins.CloudUploader\\\", \\\"job_name\\\": \\\"req name\\\", \\\n \\\"job_dsc\\\": \\\"req dsc\\\", \\\"ver\\\": \\\"req version\\\", \\\"api_address\\\": null}}\",\n tank_instance_id='agent_id',\n test_data=storage_message.StorageObject(object_storage_filename='ammo', object_storage_bucket='bucket')\n ),\n metadata=(('authorization', 'Bearer bebearer'),))\n response = job_message.TankJob()\n operation.response.Unpack(response)\n assert 'my_ammo' in response.config\n patch_aws_check_access_to_file.assert_called()\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"load/tests/api/private_v1/test_job_service.py","file_name":"test_job_service.py","file_ext":"py","file_size_in_byte":6990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"31218716301","text":"import scrapy\nimport re\nimport datetime\nfrom pyquery import PyQuery as pq\n\nclass ContractSpider(scrapy.spiders.Spider):\n\n name = \"contractspider\"\n code_url = r'https://etherscan.io/address/'\n logfile = 'log.txt'\n errorfile = 'errorlog.txt'\n pageinfo = {}\n\n headers = {\n \"user-agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36\"\n }\n\n def start_requests(self):\n yield scrapy.Request('https://etherscan.io/contractsVerified', headers=self.headers)\n\n def parse(self, response):\n dom = pq(response.body)\n profile = dom(\".container.profile\")(\".row\").eq(1) #\n regex = re.compile(r'A Total Of (\\d+) verified contract source codes found')\n totalInfo = profile.children(\".col-sm-6\").eq(0).children(\"span\").eq(0)\n m = regex.match(totalInfo.text())\n totalCount = 0\n if m:\n totalCount = m.group(1)\n lastHref = profile.children(\".col-sm-6\").eq(1)(\".btn.btn-default.btn-xs.logout\").eq(1).attr(\"href\")\n lastregex = re.compile(r'/contractsVerified/(\\d+)')\n m = lastregex.match(lastHref)\n pageSize = 0\n if m:\n pageSize = m.group(1)\n d = {\"totalCount\": totalCount, \"pageSize\": pageSize}\n for i in range(int(d[\"pageSize\"])):\n yield scrapy.Request('https://etherscan.io/contractsVerified' +'/'+ str(i+1), headers=self.headers,callback=self.parse_Page)\n # yield scrapy.Request('https://etherscan.io/address/0xe7f648ad1f726a7f81cc7101a3c3b18a94a1c3a9#code', headers=self.headers, callback=self.getContract)\n\n def getTable(self,tableNode):\n thead = tableNode(\"thead\")\n print(thead)\n heads = thead(\"th\")\n ls = list()\n for i in range(len(heads)):\n ls.append(heads.eq(i).text())\n print(ls)\n tbody = tableNode(\"tbody\")\n trs = tbody(\"tr\")\n table = list()\n table.append(ls)\n for i in range(len(trs)):\n tr = trs.eq(i)\n tds = tr(\"td\")\n item = list()\n for j in range(len(tds)):\n ele = tds.eq(j).text()\n item.append(ele)\n print(item)\n table.append(item)\n return table\n\n def parse_Page(self,response):\n dom = pq(response.body)\n tableNode = dom(\".profile.container\")(\".row\").eq(2)(\"table\")\n table = self.getTable(tableNode)\n contract = {}\n heads = table[0]\n for j in range(1, len(table)):\n for k in range(0, len(table[0])):\n contract[heads[k]] = table[j][k]\n if(len(contract['Address']) > 42):\n contract['Address'] = contract['Address'][:42]\n request = scrapy.Request(self.code_url + contract['Address'] + '#code', headers=self.headers, callback=self.getContract)\n yield request\n\n def getContractSourceCode(self, dom, contractAddr):\n try:\n source_code = dom(\"#dividcode\")(\"pre.js-sourcecopyarea\").html()\n # re_h = re.compile('?\\w+[^>]*>')\n # source_code = re_h.sub('', source_code)\n source_code.replace('?\\w+[^>]*>', '')\n source_code.replace(\">\", \">\").replace(\"<\", \"<\")\n source_code.replace('
', ' ')\n file_path = r'verified_contracts/' + contractAddr + \".sol\"\n\n print(file_path)\n out = open(file_path, \"wb+\")\n out.write(source_code.encode(\"utf-8\"))\n out.close()\n except Exception as e:\n print(e)\n\n def getContractAbi(self, dom, contractAddr):\n abi = dom(\"#dividcode\")(\"#js-copytextarea2\")\n if abi.text() is None or len(abi.text()) == 0:\n return None\n file_path = r'verified_contract_abis/' + contractAddr + \".abi\"\n\n print(file_path)\n out = open(file_path, \"w+\")\n out.write(abi.text())\n out.close()\n # print(abi.text())\n return abi.text()\n\n def getContractBin(self, dom, contractAddr):\n bin = dom(\"#dividcode\")(\"#verifiedbytecode2\")\n if bin.text() is None or len(bin.text()) == 0:\n return None\n file_path = r'verified_contract_bins/' + contractAddr + \".bin\"\n\n print(file_path)\n out = open(file_path, \"w+\")\n out.write(bin.text())\n out.close()\n # print(bin.text())\n return bin.text()\n\n def getContractConstructorParams(self, dom, contractAddr):\n pattern = re.compile('Constructor Arguments')\n if(len(pattern.findall(dom.text())) == 0):\n return None\n wordwraps = dom(\"pre.wordwrap\")\n if len(wordwraps) < 4:\n return None\n constructor = wordwraps.eq(2)\n if constructor.text() is None or len(constructor.text()) == 0:\n return None\n file_path = r'verified_contract_constructorparams/' + contractAddr + \".constructorparams\"\n print(file_path)\n out = open(file_path, \"w+\")\n txt = [constructor.text().split(\"-----Encoded View---------------\")[0]]\n txt.extend(constructor.text().split(\"-----Encoded View---------------\")[1].split(\"Arg\"))\n txt = \"\\n\".join(txt)\n out.write(txt)\n out.close()\n\n def getContractLibrary(self, dom, contractAddr):\n pattern = re.compile('Library Used')\n if(len(pattern.findall(dom.text())) == 0):\n return None\n number = 2\n pattern = re.compile('Constructor Arguments')\n if(len(pattern.findall(dom.text())) > 0):\n number = number +1\n wordwraps = dom(\"pre.wordwrap\")\n if len(wordwraps) < 4:\n return None\n library = wordwraps.eq(number)\n if library.text() is None or len(library.text()) == 0:\n return None\n file_path = r'verified_contract_libraryparams/' + contractAddr + \".libraryparams\"\n print(file_path)\n out = open(file_path, \"w+\")\n out.write(library.text())\n out.close()\n\n def getContract(self, response):\n print(response.url)\n Addr = (response.url).split('/')[-1]\n dom = pq(response.body)\n try:\n self.getContractSourceCode(dom, Addr)\n self.getContractAbi(dom, Addr)\n self.getContractBin(dom, Addr)\n self.getContractConstructorParams(dom, Addr)\n self.getContractLibrary(dom, Addr)\n with open(self.logfile,\"a+\") as f:\n f.write(str(datetime.datetime.now().strftime('%Y.%m.%d-%H:%M:%S'))+','+response.url+'\\n')\n except Exception as e:\n with open(self.errorfile,\"a+\") as f:\n f.write(str(datetime.datetime.now().strftime('%Y.%m.%d-%H:%M:%S'))+','+response.url+str(e)+'\\n')","repo_name":"ouerum/contract_downloader","sub_path":"contract_downloader/spiders/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":6752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"184202259","text":"import os\n\nfrom market.idx.yatf.test_envs.base_env import BaseEnv\nfrom market.idx.feeds.qparser.bin.executor.qparser import QParser\nfrom market.idx.datacamp.proto.api.UpdateTask_pb2 import PartnerStats\nfrom market.idx.datacamp.proto.errors.Explanation_pb2 import ExplanationBatch\n\nfrom market.pylibrary.pbufsn_utils import read_pbufsn\n\n\nclass QParserTestEnv(BaseEnv):\n def __init__(self, qparser_bin, yt_server=None, **resources):\n\n super(QParserTestEnv, self).__init__(**resources)\n\n if 'feed_cfg' in self.resources:\n self.resources['feed_cfg'].update_options({\n 'trace': {\n 'enable_offers_trace': True,\n 'offers_trace_dir': self.output_dir\n },\n 'fp_metadata': {\n 'enabled': True,\n 'filename': 'test_fp_meta',\n },\n })\n if 'logbroker' in self.resources['feed_cfg'].options:\n self.resources['feed_cfg'].options['logbroker']['bind_business_id_to_partition'] = True\n self.resources['feed_cfg'].options['logbroker']['writers_count'] = 1\n self.resources['feed_cfg'].options['logbroker']['enable_stocks_deduplication_by_ts'] = True\n if yt_server:\n assert 'basic_table' in self.resources\n assert 'service_table' in self.resources\n assert 'actual_service_table' in self.resources\n yt_options = {\n 'sas_proxy': yt_server.get_server(),\n 'vla_proxy': yt_server.get_server(),\n 'meta_proxy': yt_server.get_yt_client().config[\"proxy\"][\"url\"],\n 'token_path': yt_server.get_yt_client().config[\"token\"] or \"NO_TOKEN\",\n 'basic_offers_table': self.resources['basic_table'].table_path,\n 'service_offers_table': self.resources['service_table'].table_path,\n 'actual_service_offers_table': self.resources['actual_service_table'].table_path,\n 'max_inflight': 1,\n 'full_deduplication': True\n }\n if 'service_search_table' in self.resources:\n yt_options['search_service_offers_table'] = self.resources['service_search_table'].table_path\n if 'actual_service_search_table' in self.resources:\n yt_options['search_actual_service_offers_table'] = self.resources['actual_service_search_table'].table_path\n self.resources['feed_cfg'].update_options({'yt': yt_options})\n\n explanation_log_filename = self.feed_cfg.options.get('explanation_log', {}).get('filename')\n self.feed_errors_file = os.path.join(self.output_dir, explanation_log_filename) if explanation_log_filename else None\n\n parsing_stats_filename = self.feed_cfg.options.get('partner_stats', {}).get('filename')\n self.partner_stats_file = os.path.join(self.output_dir, parsing_stats_filename) if parsing_stats_filename else None\n\n self.qparser_bin = qparser_bin\n\n @property\n def feed_cfg(self):\n return self.resources['feed_cfg']\n\n @property\n def description(self):\n return 'qparser'\n\n @property\n def feed_parsing_errors(self):\n fatal_feed_errors = None\n if self.feed_errors_file and os.path.exists(self.feed_errors_file):\n explanation_batch = list(read_pbufsn(self.feed_errors_file, ExplanationBatch, 'EXPM'))\n if explanation_batch:\n fatal_feed_errors = explanation_batch[0].explanation\n return fatal_feed_errors\n\n @property\n def parsing_stats(self):\n stats = None\n if self.partner_stats_file and os.path.exists(self.partner_stats_file):\n stats = list(read_pbufsn(self.partner_stats_file, PartnerStats, 'PRST'))\n if not stats:\n stats = None # [] -> None\n return stats\n\n @property\n def partner_parsing_stats(self):\n if not self.parsing_stats:\n return None\n return self.parsing_stats[0]\n\n @property\n def feed_archive_link(self):\n return ''\n\n def execute(self):\n qparser = QParser(bin_path=self.qparser_bin, config_paths=[self.feed_cfg.path])\n retcode = qparser.run(\n feed_path=self.feed_cfg.feed_path,\n feed_info=self.feed_cfg.feed_info,\n output_dir=self.output_dir,\n feed_parsing_task_filepath=self.resources['feed_parsing_task_filepath'].path,\n shopdat_path=self.resources['shops_dat'].path if 'shops_dat' in self.resources else None\n )\n return retcode\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"market/GENERAL/test_env (10).py","file_name":"test_env (10).py","file_ext":"py","file_size_in_byte":4591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"71087725033","text":"import falcon\nfrom app.utils import log\nfrom app.config.config import SECRET_KEY\nimport jwt\n\nLOG = log.get_logger()\n\n\nclass AuthenticatedRoute(object):\n \"\"\"\n Middleware used to require authentication for all resources/routes\n except login.\n \"\"\"\n def process_request(self, req, res, resource=None):\n if req.path not in ['/v1/users/self/login']:\n if req.auth is not None:\n LOG.info('Authentication header present')\n u = jwt.decode(req.auth, SECRET_KEY, algorithms=['HS256'])\n if u:\n req.context['user'] = u\n else:\n raise falcon.HTTPUnauthorized('Invalid authorization token.')\n else:\n raise falcon.HTTPUnauthorized('Authorization token is required for this resource.')","repo_name":"igio/falcon-rest-starter-app","sub_path":"app/middleware/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"9914026503","text":"\"\"\"\nMerge two sorted linked lists and return it as a sorted list. \nThe list should be made by splicing together the nodes of the \nfirst two lists.\n\nExample 1:\nInput: l1 = [1,2,4], l2 = [1,3,4]\nOutput: [1,1,2,3,4,4]\n\nExample 2:\nInput: l1 = [], l2 = []\nOutput: []\n\nExample 3:\nInput: l1 = [], l2 = [0]\nOutput: [0]\n \nConstraints:\nThe number of nodes in both lists is in the range [0, 50].\n-100 <= Node.val <= 100\nBoth l1 and l2 are sorted in non-decreasing order.\n\"\"\"\n\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n # Option 1 - iterative:\n head = tail = ListNode()\n \n # while both linked lists have nodes to traverse, compare the values\n # and add smaller value to tail, moving to the next node in the\n # respective list\n while l1 and l2:\n if l1.val <= l2.val:\n # tail.next = ListNode(l1.val)\n # tacking on entire list seems to run faster than creating\n # new node\n tail.next = l1 \n l1 = l1.next\n else:\n # tail.next = ListNode(l2.val)\n tail.next = l2\n l2 = l2.next\n \n # update tail\n tail = tail.next\n \n # add remaining list to end of tail\n tail.next = l1 or l2\n \n return head.next\n \n# # Option 2 - recursive:\n# # if it is not the end of either linked list\n# if l1 and l2:\n# # return the smaller of the two nodes and continue traversing to\n# # next node in the respective list\n# if l1.val < l2.val:\n# return ListNode(l1.val, self.mergeTwoLists(l1.next, l2))\n# else:\n# return ListNode(l2.val, self.mergeTwoLists(l1, l2.next))\n \n# # return remaining list which has not reached the end\n# return l1 or l2","repo_name":"nyccowgirl/coding-challenges","sub_path":"leet/Python/mergesortedll.py","file_name":"mergesortedll.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"43230219729","text":"# coding:utf-8\nfrom django.conf.urls import patterns, include, url\nfrom . import views\n\nurlpatterns = [\n # url(r'^(?P\\d+)/$', views.detail, name='learn_detail'),\n # url(r'^(?P\\d+)/vote/$', views.vote, name='vote'),\n # url(r'^index/$', views.index, name='index'),\n url(r'^register/$', views.register, name='register'),\n url(r'^signin/$', views.signin, name='signin'),\n url(r'^signout/$', views.signout, name='signout'),\n\n url(r'^$', views.account, name='account'),\n url(r'^setting/$', views.setting, name='setting'),\n\n]","repo_name":"doxiaodong/mysite","sub_path":"apps/account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"11877992519","text":"import numpy as np\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom Actor_Critic import Actor_Critic\nfrom Discriminator import Discriminator, CodeQ\n# from Autoencoder import Encoder\n# from ActionEncoder import ActionEncoder\nfrom Memory import ShortMemory, LongMemory\nfrom util import *\n\nclass Agent():\n def __init__(self, expert_weights=None):\n if expert_weights is not None:\n pretrain_loss_function = CrossEntropyLoss(torch.as_tensor(expert_weights, dtype=torch.float, device=DEVICE))\n self.actor_critic = Actor_Critic(pretrain_loss_function).to(DEVICE)\n else:\n default_weights = np.ones(VOCAB_SIZE)\n self.actor_critic = Actor_Critic(CrossEntropyLoss(torch.as_tensor(default_weights, dtype=torch.float, device=DEVICE)).to(DEVICE)).to(DEVICE)\n self.discriminator = Discriminator().to(DEVICE)\n self.codeq = CodeQ().to(DEVICE)\n \n self.short_memory = ShortMemory(self.actor_critic, self.discriminator)\n self.long_memory = LongMemory()\n self.info_loss_function = CrossEntropyLoss()\n self.horizon_cnt = 0\n self.kl_coef = 0.1\n \n def get_action(self, state, code, test=False):\n '''\n IN:\n state: [STATE_SIZE,](torch.FloatTensor)\n code: single integer\n OUT:\n action: single integer\n log_prob: single float\n '''\n self.actor_critic.eval()\n code = to_onehot(code)\n if len(state.size()) < 2:\n state = state.unsqueeze(0)\n if len(code.size()) < 2:\n code = code.unsqueeze(0)\n action, log_prob = self.actor_critic.action_forward(state, code, test=test)\n action = action[0].cpu().numpy().item()\n log_prob = log_prob[0].cpu().numpy().item()\n return action, log_prob\n\n def store(self, s, a, c, d, next_s, log_prob):\n '''\n IN:\n s: [STATE_SIZE,](torch.FloatTensor)\n a: single integer\n c: single integer\n d: single boolean\n next_s: [STATE_SIZE,](torch.FloatTensor)\n log_prob: single float\n '''\n self.short_memory.append(s, a, c, d, log_prob)\n self.horizon_cnt += 1\n if d or self.horizon_cnt == HORIZON_THRESHOLD:\n self.horizon_cnt = 0\n self.short_memory.set_last_state(next_s)\n self.actor_critic.eval()\n self.discriminator.eval()\n self.short_memory.move_to_long_memory(self.long_memory)\n self.short_memory.flush()\n return self.long_memory.check_update()\n else:\n return False\n\n def discriminator_update(self, expert_states, expert_actions, expert_codes):\n self.discriminator.train()\n self.codeq.train()\n #shuffle expert trajectories\n expert_chunk_length = len(expert_states)\n expert_indice = np.arange(expert_chunk_length)\n np.random.shuffle(expert_indice)\n expert_states = expert_states[expert_indice]\n expert_actions = expert_actions[expert_indice]\n #shuffle agent trajectories\n agent_chunk_length = self.long_memory.count\n agent_indice = np.arange(agent_chunk_length)\n np.random.shuffle(agent_indice)\n agent_states = self.long_memory.states[agent_indice]\n agent_actions = self.long_memory.actions[agent_indice]\n half_batch_size = int(BATCH_SIZE/2)\n for i in range(min(expert_chunk_length//half_batch_size, agent_chunk_length//half_batch_size)):\n #agent\n batch_agent_states = torch.as_tensor(agent_states[i*half_batch_size:(i+1)*half_batch_size], dtype=torch.float, device=DEVICE)\n batch_agent_actions = torch.as_tensor(agent_actions[i*half_batch_size:(i+1)*half_batch_size], dtype=torch.float, device=DEVICE)\n #expert\n batch_expert_states = torch.as_tensor(expert_states[i*half_batch_size:(i+1)*half_batch_size], dtype=torch.float, device=DEVICE)\n batch_expert_actions = torch.as_tensor(expert_actions[i*half_batch_size:(i+1)*half_batch_size], dtype=torch.float, device=DEVICE)\n #to make same len\n min_length = min(len(batch_agent_states), len(batch_expert_states))\n batch_agent_states = batch_agent_states[:min_length]\n batch_agent_actions = batch_agent_actions[:min_length]\n batch_expert_states = batch_expert_states[:min_length]\n batch_expert_actions = batch_expert_actions[:min_length]\n assert len(batch_agent_states) == len(batch_expert_states)\n #concat\n batch_states = torch.cat((batch_agent_states, batch_expert_states), 0)\n batch_actions = torch.cat((batch_agent_actions, batch_expert_actions), 0)\n\n disc_loss = self.discriminator.calculate_wail_loss(batch_states, batch_actions)\n #self.kl_coef = max(0, self.kl_coef + KL_STEP*(kl - IC))\n print('d loss: ', disc_loss)\n self.discriminator.train_by_loss(disc_loss)\n\n\n def actor_critic_update(self, expert_states, expert_actions, expert_codes):\n self.actor_critic.train()\n #shuffle agent trajectories\n agent_chunk_length = self.long_memory.count\n indice = np.arange(agent_chunk_length)\n np.random.shuffle(indice)\n states = self.long_memory.states[indice]\n actions = self.long_memory.actions[indice]\n codes = self.long_memory.codes[indice]\n gaes = self.long_memory.gaes[indice]\n oracle_values = self.long_memory.oracle_values[indice]\n old_log_probs = self.long_memory.old_log_probs[indice]\n #shuffle expert trajectories\n expert_chunk_length = len(expert_states)\n expert_indice = np.arange(expert_chunk_length)\n np.random.shuffle(expert_indice)\n expert_states = expert_states[expert_indice]\n expert_actions = expert_actions[expert_indice]\n expert_codes = expert_codes[expert_indice]\n pretrain_loss_sum = 0\n for i in range(min(expert_chunk_length//BATCH_SIZE, agent_chunk_length//BATCH_SIZE)):\n #pretrain loss\n batch_expert_states = torch.as_tensor(expert_states[i*BATCH_SIZE:(i+1)*BATCH_SIZE], dtype=torch.float, device=DEVICE)\n batch_expert_actions = torch.as_tensor(expert_actions[i*BATCH_SIZE:(i+1)*BATCH_SIZE], dtype=torch.long, device=DEVICE)\n batch_expert_codes = expert_codes[i*BATCH_SIZE:(i+1)*BATCH_SIZE]\n pretrain_loss = self.actor_critic.pretrain_loss(batch_expert_states, batch_expert_actions, batch_expert_codes)\n pretrain_loss_sum += pretrain_loss.detach().cpu().numpy()\n #actor critic loss\n batch_states = torch.as_tensor(states[i*BATCH_SIZE:(i+1)*BATCH_SIZE], dtype=torch.float, device=DEVICE)\n batch_actions = torch.as_tensor(actions[i*BATCH_SIZE:(i+1)*BATCH_SIZE], dtype=torch.long, device=DEVICE)\n batch_codes = codes[i*BATCH_SIZE:(i+1)*BATCH_SIZE]\n batch_gaes = torch.as_tensor(gaes[i*BATCH_SIZE:(i+1)*BATCH_SIZE], dtype=torch.float, device=DEVICE)\n batch_oracle_values = torch.as_tensor(oracle_values[i*BATCH_SIZE:(i+1)*BATCH_SIZE], dtype=torch.float, device=DEVICE)\n batch_old_log_probs = torch.as_tensor(old_log_probs[i*BATCH_SIZE:(i+1)*BATCH_SIZE], dtype=torch.float, device=DEVICE)\n critic_loss = self.actor_critic.critic_loss(batch_states, batch_codes, batch_oracle_values)\n actor_loss = self.actor_critic.actor_loss(batch_states, batch_actions, batch_codes, batch_gaes, batch_old_log_probs)\n #info loss\n one_hot_codes = to_onehot(batch_codes)\n action_logits = self.actor_critic(batch_states, one_hot_codes)\n one_hot_action = gumbel_softmax(action_logits)\n code_out = self.codeq.onehot_forward(batch_states, one_hot_action)\n info_loss = self.info_loss_function(code_out, torch.as_tensor(batch_codes, dtype=torch.long, device=DEVICE))\n #integrated loss\n loss = PRETRAIN_COEF*pretrain_loss + ACTOR_COEF*actor_loss + CRITIC_COEF*critic_loss + INFO_COEF*info_loss\n print('a loss: ', actor_loss, end=' ')\n print('c loss: ', critic_loss, end=' ')\n print('p loss: ', pretrain_loss, end=' ')\n print('info loss: ', info_loss)\n self.actor_critic.train_by_loss(loss)\n return pretrain_loss_sum/(min(expert_chunk_length//BATCH_SIZE, agent_chunk_length//BATCH_SIZE))\n\n\n def update(self, expert_chunks, update_actor):\n '''\n updates policy, and selectively discriminator\n IN:\n expert_chunks: list of expert_chunk, length: PPO_STEP\n update_discriminator: boolean type flag which determines whether to update discriminator or not\n '''\n pretrain_loss = 0\n for i in range(DISC_STEP):\n expert_chunk = expert_chunks[i]\n expert_states = expert_chunk['states']\n expert_actions = expert_chunk['actions'].reshape((-1,))\n expert_codes = expert_chunk['codes'].reshape((-1,))\n self.discriminator_update(expert_states, expert_actions, expert_codes)\n if update_actor:\n for i in range(PPO_STEP):\n expert_chunk = expert_chunks[i]\n expert_states = expert_chunk['states']\n expert_actions = expert_chunk['actions'].reshape((-1,))\n expert_codes = expert_chunk['codes'].reshape((-1,))\n pretrain_loss += self.actor_critic_update(expert_states, expert_actions, expert_codes)\n self.long_memory.flush()\n return pretrain_loss/PPO_STEP\n\n def pretrain(self, expert_chunk):\n self.actor_critic.train()\n expert_states = expert_chunk['states']\n expert_actions = expert_chunk['actions'].reshape((-1,))\n expert_codes = expert_chunk['codes'].reshape((-1,))\n expert_chunk_length = len(expert_states)\n expert_indice = np.arange(expert_chunk_length)\n np.random.shuffle(expert_indice)\n expert_states = expert_states[expert_indice]\n expert_actions = expert_actions[expert_indice]\n expert_codes = expert_codes[expert_indice]\n loss_sum = 0\n for i in range(expert_chunk_length//BATCH_SIZE):\n batch_expert_states = torch.as_tensor(expert_states[i*BATCH_SIZE:(i+1)*BATCH_SIZE], dtype=torch.long, device=DEVICE)\n batch_expert_actions = torch.as_tensor(expert_actions[i*BATCH_SIZE:(i+1)*BATCH_SIZE], dtype=torch.long, device=DEVICE)\n batch_expert_codes = expert_codes[i*BATCH_SIZE:(i+1)*BATCH_SIZE]\n pretrain_loss = self.actor_critic.pretrain_loss(batch_expert_states, batch_expert_actions, batch_expert_codes)\n self.actor_critic.pretrain_by_loss(pretrain_loss)\n loss_sum += pretrain_loss.detach().cpu().numpy()\n return loss_sum/(expert_chunk_length//BATCH_SIZE)\n\n def pretrain_save(self):\n torch.save(self.actor_critic.state_dict(), PRETRAIN_SAVEPATH)\n\n def pretrain_load(self):\n self.actor_critic.load_state_dict(torch.load(PRETRAIN_SAVEPATH, map_location=torch.device(DEVICE)))\n self.actor_critic.to(DEVICE)\n self.discriminator.load_state_dict(torch.load(MODEL_SAVEPATH + 'pretrain' + 'disc.pt', map_location=torch.device(DEVICE)))\n self.discriminator.to(DEVICE)\n self.codeq.load_state_dict(torch.load(MODEL_SAVEPATH + 'pretrain' + 'codeQ.pt', map_location=torch.device(DEVICE)))\n self.codeq.to(DEVICE)\n\n def save(self, epoch):\n epoch += 55000\n self.actor_critic.save(epoch)\n self.discriminator.save(epoch)\n self.codeq.save(epoch)\n\n def load(self, epoch):\n self.actor_critic.load_state_dict(torch.load(MODEL_SAVEPATH + str(epoch) + 'ac.pt', map_location=torch.device(DEVICE)))\n self.actor_critic.to(DEVICE)\n self.discriminator.load_state_dict(torch.load(MODEL_SAVEPATH + str(epoch) + 'disc.pt', map_location=torch.device(DEVICE)))\n self.discriminator.to(DEVICE)\n self.codeq.load_state_dict(torch.load(MODEL_SAVEPATH + str(epoch) + 'codeQ.pt', map_location=torch.device(DEVICE)))\n self.codeq.to(DEVICE)\n","repo_name":"sunghoonhong/CoG-TAIL","sub_path":"Agent.py","file_name":"Agent.py","file_ext":"py","file_size_in_byte":12174,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"16827923390","text":"import json\nimport os.path\n\nfrom sea.app import Sea\nfrom sea.config import ConfigAttribute, Config\n\n\ndef test_config_from_class():\n class Base(object):\n TEST_KEY = 'foo'\n TEST_VALUE = 'bar'\n\n class Test(Base):\n TESTING = True\n\n path = os.path.join(os.path.dirname(__file__), '..')\n app = Sea(os.path.abspath(path))\n app.config.from_object(Test)\n\n assert app.testing\n assert app.config['TEST_KEY'] == 'foo'\n assert 'TestConfig' not in app.config\n d = app.config.get_namespace('TEST_')\n assert 'key' in d\n assert 'value' in d\n d = app.config.get_namespace(\n 'TEST_', lowercase=False, trim_namespace=False)\n assert 'TEST_KEY' in d\n s = repr(app.config)\n assert ' 0.5:\n self.alarms.append(\"Perimeter breach detected at %s\" % datetime.datetime.strftime(\n datetime.datetime.now(),\n \"%Y-%m-%d %H:%M:%S\"))\n\n if len(self.alarms) > 0:\n return {\"alarms\": self.alarms}\n else:\n return {\"alarms\": 0}\n\n def reset_alarms(self):\n del self.alarms[:]\n return {\"msg\": \"All alarms are reset\"}\n","repo_name":"CiscoIOx/restsense","sub_path":"sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"}
+{"seq_id":"40835240034","text":"from endpoint import CurrentConditions\nfrom endpoint import SunPhase\nfrom endpoint import WeatherAlerts\nfrom endpoint import TenDayForecast\nfrom endpoint import HurricaneAdvisory\nfrom clear import clear_screen\nimport re\n\n\ndef get_zipcode():\n first = True\n while first or (len(user_input) != 5 and not re.search(r'^[0-9]{5}?$', user_input)):\n first = False\n user_input = input(\"Enter a 5 digit zipcode: \")\n return user_input\n\n\ndef main():\n\n print(\"Let's check the weather\")\n zipcode = get_zipcode()\n curr_weather = CurrentConditions(zipcode)\n sun_phase = SunPhase(zipcode)\n ten_day = TenDayForecast(zipcode)\n weather_alerts = WeatherAlerts(zipcode)\n h = HurricaneAdvisory()\n\n print(curr_weather.get_current_conditions())\n print(sun_phase.get_sun_phase())\n\n print(\"\\nWhat else would you like to know about {}?\".format(curr_weather.city_full_name))\n\n while True:\n user_input = input(\"\\n1) 10 day forcast\\n2) Weather Alerts\\n3) Hurricane Advisory\\n4) Quit\\n\")\n if user_input == '1':\n\n ten_day.get_ten_day_forcast()\n\n elif user_input == '2':\n\n weather_alerts.get_alerts()\n\n elif user_input == '3':\n\n h.get_hurricanes()\n elif user_input == '4':\n exit()\n\n\n\nif __name__ == \"__main__\":\n clear_screen()\n main()\n","repo_name":"emanonGarcia/weather_report","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"20911461822","text":"import copy\nimport logging\nimport os\nimport shutil\nimport zipfile\n\nfrom pathlib import Path\n\nfrom genson import SchemaBuilder\nfrom munch import munchify\nfrom tinydb import Query\n\nfrom fashion.segment import Segment\nfrom fashion.util import cd\nfrom fashion.xforms import matchTags\n\n\nclass Warehouse(object):\n '''Manage collection of segments.'''\n\n def __init__(self, dir, fallback=None):\n '''\n Constructor.\n\n :param Path dir: directory for segment subdirectories.\n :param Warehouse fallback: another Warehouse to check for missing segments.\n '''\n\n # Location of this Warehouse.\n self.dir = dir.absolute()\n\n # Another Warehouse 2nd in priority to this one.\n self.fallback = fallback\n\n # A cache of already loaded segments.\n self.segmentCache = {}\n\n def listSegments(self):\n '''\n List names of segments in this warehouse.\n\n :returns: a list of segment names in this warehouse.\n :rtype: list(string)\n '''\n # Return the named subdirectories.\n with cd(self.dir):\n return [d.name for d in self.dir.iterdir() if d.is_dir()]\n\n def loadSegment(self, segname, db, cache=None):\n '''\n Load a segment by name from this or fallback Warehouse.\n\n :param string segname: name of the segment to load.\n :returns: the loaded segment or None.\n :rtype: Segment\n '''\n if cache is None:\n cache = self.segmentCache\n\n # Try the cache first.\n if segname in cache:\n return cache[segname]\n \n # Next check a named subdirectory.\n segfn = self.dir / segname / \"segment.json\"\n seg = None\n if segfn.exists():\n if db.isVerbose():\n print(\"Loading segment {0}\".format(segname))\n seg = Segment.load(segfn)\n elif self.fallback is not None:\n # Try the fallback Warehouse if not found.\n seg = self.fallback.loadSegment(segname, db)\n\n # Update the cache.\n cache[segname] = seg\n Q = Query()\n\n # Make a note in the database.\n db.table('fashion.prime.segment').upsert(seg.properties, Q.name == segname)\n\n return seg\n\n def loadSegments(self, db):\n '''\n Load all segments in this and referenced warehouses.\n\n :returns: list of all Segment objects.\n :rtype: list(Segment)\n '''\n db.table('fashion.prime.segment').purge()\n return self.loadSegs(db, self.segmentCache)\n\n def loadSegs(self, db, cache):\n # Load all the segments in this Warehouse.\n self.segments = [self.loadSegment(segname, db)\n for segname in self.listSegments()]\n if self.fallback is not None:\n # Append the fallback Warehouse segments.\n self.segments.extend(self.fallback.loadSegs(db, cache))\n return self.segments\n\n def newSegment(self, segname, db):\n '''\n Create a new segment in this warehouse.\n\n :param string segname: name of the new segment.\n :returns: the new Segment object.\n :rtype: Segment\n '''\n if segname in self.listSegments():\n logging.error(\"segment {0} already exists\".format(segname))\n return\n segdir = self.dir / segname\n segdir.mkdir(parents=True, exist_ok=True)\n Segment.create(segdir, segname)\n self.loadSegment(segname, db)\n\n def exportSegment(self, segname, db):\n '''\n Export a segment to a zip file.\n\n :param string segname: name of segment to export.\n '''\n seg = self.loadSegment(segname, db)\n exportName = segname + \"_v\" + seg.properties.version + \".zip\"\n dirName = seg.absDirname.parent.resolve()\n with zipfile.ZipFile(exportName, mode='w') as zip:\n with cd(dirName):\n for root, _, files in os.walk(segname):\n if os.path.basename(root) != '__pycache__':\n for file in files:\n zip.write(os.path.join(root, file))\n\n def importSegment(self, zipfilename):\n '''\n Import a segment from a zip file.\n\n :param string zipfilename: filename of export.\n '''\n with zipfile.ZipFile(zipfilename, mode='r') as zip:\n with cd(self.dir):\n zip.extractall()\n\n def deleteSegment(self, segment):\n '''\n Delete a segment from this warehouse.\n\n :param Segment segment: the segment object to delete from this warehouse.\n '''\n shutil.rmtree(str(segment.absDirname))\n\n def getModuleDefinitions(self, dba, tags=None):\n '''\n Load all \"xformModules\" xform module defintions from all segments \n which match tags. Does NOT load the modules.\n\n :param list(string) tags: list of tags to match before loading.\n :returns: a dictionary of module definions.\n :rtype: dictionary {moduleName:module}\n '''\n modDefs = {}\n dba.table('fashion.prime.module.definition').purge()\n for seg in self.segments:\n xformModules = munchify(seg.findModuleDefinitions())\n for m in xformModules:\n if m.moduleName in modDefs:\n logging.error(\n \"xform module name collision: {0}\".format(m.moduleName))\n else:\n mod = munchify(m)\n if \"templatePath\" not in mod:\n if \"templatePath\" in seg.properties:\n mod.templatePath = seg.properties.templatePath\n else:\n mod.templatePath = []\n mod.absDirname = seg.absDirname.as_posix()\n mod.moduleRootName = m.moduleName\n mod.segmentName = seg.properties.name\n dba.table('fashion.prime.module.definition').insert(mod)\n modDefs[mod.moduleName] = mod\n return modDefs\n\n def getModuleConfigs(self, dba, moduleDict):\n '''\n Load all \"xformConfig\" xform module configurations from all segments \n for modules in moduleDict. Does NOT load the modules or initialize them.\n\n :param moduleDict: a dictionary of module definitions.\n :returns: a list of xform modules configurations.\n :rtype: list(xform module configs)\n '''\n cfgs = []\n dba.table('fashion.prime.module.config').purge()\n for seg in self.segments:\n for c in seg.properties.xformConfig:\n if c.moduleName in moduleDict:\n cfg = munchify(c)\n cfg.name = cfg.moduleName\n cfg.segmentName = seg.properties.name\n cfg.absDirname = seg.absDirname.as_posix()\n # set defaults for omitted properties\n if \"inputKinds\" not in cfg:\n cfg.inputKinds = []\n if \"outputKinds\" not in cfg:\n cfg.outputKinds = []\n if \"tags\" not in cfg:\n cfg.tags = []\n if \"templatePath\" not in cfg:\n if \"templatePath\" in seg.properties:\n cfg.templatePath = seg.properties.templatePath\n else:\n cfg.templatePath = []\n cfgs.append(cfg)\n dba.table('fashion.prime.module.config').insert(cfg)\n else:\n logging.error(\"No module for config: {0}\".format(c.moduleName))\n return cfgs\n\n def getUndefinedModuleConfigs(self, moduleDict):\n '''\n Load all \"xformConfig\" from all segments for modules NOT in moduleDict.\n\n :param moduleDict: a dictionary with keys of module names.\n :returns: a list of xform modules configurations.\n :rtype: list(xform module configs)\n '''\n cfgs = []\n for seg in self.segments:\n for cfg in seg.properties.xformConfig:\n if cfg.moduleName not in moduleDict:\n cfg.properties.name = cfg.properties.moduleName\n cfgs.append(cfg)\n return cfgs\n\n def getSchemaDefintions(self):\n '''\n Load all segment schemas.\n :returns: a dictionary of schemas for models by kind.\n :rtype: dictionary {string kind:string schema filename}\n '''\n schemaDescrs = {}\n for seg in self.segments:\n for sch in seg.properties.schema:\n if sch.kind in schemaDescrs:\n logging.error(\n \"duplicate schema definition: {0}\".format(sch.kind))\n else:\n sch.absDirname = seg.absDirname\n schemaDescrs[sch.kind] = sch\n return schemaDescrs\n\n def guessSchema(self, dba, kind, existingSchema=None):\n '''\n Guess a JSONSchema for a model kind from examples.\n\n :param DatabaseAccess dba: the fasion database to search.\n :param string kind: the model kind to guess.\n :param JSONobject existingSchema: starting schema, if any.\n :returns: True if the schema was guessed and created.\n :rtype: boolean\n '''\n objs = dba.table(kind).all()\n builder = SchemaBuilder()\n if existingSchema is not None:\n builder.add_schema(existingSchema)\n elif len(objs) == 0:\n logging.error(\n \"Can't guess with no schema and no examples of kind {0}\".format(kind))\n return False\n for o in objs:\n builder.add_object(o)\n schema = builder.to_schema()\n localSeg = self.loadSegment(\"local\", dba)\n localSeg.createSchema(kind, schema)\n return True\n","repo_name":"braddillman/fashion","sub_path":"fashion/warehouse.py","file_name":"warehouse.py","file_ext":"py","file_size_in_byte":9873,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"11619470211","text":"#!/usr/bin/env python\n# -*- coding: iso-8859-15 -*-\nfrom django.conf.urls.defaults import patterns, include, url\n\n\nurlpatterns = patterns('',\n url(r'^$',\n 'glean.views.home',\n name='glean-home'),\n\n # GLEANERS\n url(r'^gleaners/$',\n 'glean.views.gleaners',\n name=\"glean-gleaners\"),\n\n\n # SEARCHES\n url(r'^search/create/$',\n 'glean.views.search_create',\n name='glean-search-create'),\n url(r'^search/gleaner/chooser/$',\n 'glean.views.gleaner_chooser',\n name='glean-gleaner-chooser'),\n url(r'^search/gleaner/form/(?P[^/]+)/$',\n 'glean.views.gleaner_form',\n name='glean-gleaner-form'),\n)\n","repo_name":"stringfellow/django-glean","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"38562862461","text":"from src import create_app, db\nfrom src.database.models import User, Watchlist\nimport os\napp = create_app()\n\nif __name__ == '__main__':\n # db.create_all()\n os.getcwd()\n app.run()\n\n@app.shell_context_processor\ndef make_shell_context():\n return {'db': db, 'User': User, 'Watchlist': Watchlist}\n","repo_name":"alcoccoque/watchmovie","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"18211554620","text":"import random\nfrom random import shuffle\nimport HumanPlayer\nimport RandomPlayer\nimport AzulayPlayer\nimport GoobiPlayer\nimport HillelPlayer\nimport OfirPlayer\nimport HeuristicPlayer\nimport learningAgent\nimport ExpectiPlayer\nimport TScoringPlayer\nimport EndRacePlayer\nimport QLearningAgent\nimport ApproxQLearningAgent\nimport sys\n\nAPPROX_QLEARNING_STRING_REP = \"Approx Q Player\"\nAPPROX_QLEARNING_PLAYER_INDEX = '12'\nQLEARNING_PLAYER_STRING_REP = \"Q Player\"\nQLEARNING_PLAYER_INDEX = '11'\nEND_RACE_PLAYER_STRING_REP = \"End Race Player\"\nEND_RACE_PLAYER_INDEX = '10'\nTSCORING_PLAYER_STRING_REP = \"T Scoring Player\"\nTSCORING_PLAYER_INDEX = '9'\nEXPECTI_PLAYER_STRING_REP = \"Expecti Player\"\nEXPECTI_PLAYER_INDEX = '8'\nRANDOM_PLAYER_STRING_REP = \"Random Player\"\nRANDOM_PLAYER_INDEX = '7'\nHUMAN_PLAYER_STRING_REP = \"Human Player\"\nHUMAN_PLAYER_INDEX = '6'\nHILLEL_PLAYER_STRING_REP = \"Hillel Player\"\nHILLEL_PLAYER_INDEX = '5'\nOFIR_PLAYER_STRING_REP = \"Ofir Player\"\nOFIR_PLAYER_INDEX = '4'\nAZULAY_PLAYER_STRING_REP = \"Azulay Player\"\nAZULAY_PLAYER_INDEX = '3'\nHEURISTIC_PLAYER_STRING_REP = \"Heuristic Player\"\nHEURISTIC_PLAYER_INDEX = '2'\nLEARNING_PLAYER_INDEX = '1'\nLEARNING_PLAYER_STRING_REP = \"Learning Player\"\nGOOBI_PLAYER_INDEX = '0'\nGOOBI_PLAYER_STRING_REP = \"Goobi Player\"\nRESET_SCORE = 1\nMIN_DICE_VALUE = 1\nMAX_DICE_VALUE = 6\nFIRST_PLAYER = 0\nCONTINUE = 0\nGAME_OVER = 1\nGAME_OVER_MSG = \"game over. winner is: \"\nGOOBI_PLAYER = 0\nLEARNING_PLAYER = 1\nHEURISTIC_PLAYER = 2\nAZULAY_PLAYER = 3\nOFIR_PLAYER = 4\nHILLEL_PLAYER = 5\nHUMAN_PLAYER = 6\nRANDOM_PLAYER = 7\nEXPECTI_PLAYER = 8\n\n\n\"\"\"\nThis class defines a pig game representation.\nBy using this class we simulates a multiple games (as we desire) of pig game.\nThe object of the jeopardy dice game Pig is to be the first player to reach 100\npoints. Each player's turn consists of repeatedly rolling a die. After each\nroll, the player is faced with two choices:\n1)\troll again: . If the player rolls a 1, the player scores nothing and it\nbecomes the opponent's turn. . If the player rolls a number other than 1, the\n number is added to the player's turn total and the player's turn continues.\n2)\tHold: . If the player holds (he decline to roll again), the turn total, the\nsum of the rolls during the turn, is added to the player's score, and it\nbecomes the opponent's turn.\n\"\"\"\n\n\nclass PigGame(object):\n \"\"\"\n This is the constructor of the class.\n It receives a goal score and players types\n \"\"\"\n def __init__(self, goal_score, players):\n print_mode = int(sys.argv[3])\n if print_mode:\n # Print welcome messages in the beginning of the game only if\n # the player input print bit is on\n print()\n print(\"**********Welcome To Pig - Dice Game**********\")\n print()\n print(\" ,.\")\n print(\" (_|,.\")\n print(\" ,' /, )_______ _\")\n print(\" __j o``-' `.'-)'\")\n print(\" (') \\'\")\n print(\" `-j |\")\n print(\" `-._( /\")\n print(\" |_\\ |--^. /\")\n print(\" /_]'|_| /_)_/\")\n print(\" /_]' /_]'\")\n print()\n print(\"*******************Good Luck******************\")\n print()\n self.players = players\n self.max_score = goal_score\n self.current_player = players[FIRST_PLAYER]\n self.num_of_players = len(players)\n self.state = {}\n for i in range(self.num_of_players):\n self.state[players[i].get_index()] = 0\n if print_mode:\n print(\"The participating players are: \", end=\"\")\n for index, player in enumerate(players):\n if index != len(players) - 1:\n print(player.get_name() + \", \", end=\"\")\n else:\n print(player.get_name())\n print()\n\n \"\"\"\n This is a simple function to simulates a dice.\n This function returns number between 1 to 6 with uniform distribution\n \"\"\"\n @staticmethod\n def roll_dice():\n return random.randint(MIN_DICE_VALUE, MAX_DICE_VALUE)\n\n \"\"\"\n This function responsible for running a player turn.\n In each turn, the player is asked to roll the dice or end his turn.\n \"\"\"\n def play_turn(self, player):\n print_mode = int(sys.argv[3])\n if print_mode:\n print(\"current player: \", player.get_name())\n current_turn_score = 0\n # ask the player what will be his next move - roll the dice or end turn\n while player.play(self.state, current_turn_score):\n current_roll = self.roll_dice()\n if print_mode:\n print(str(current_roll) + \" rolled by \" + player.get_name())\n if current_roll != RESET_SCORE:\n current_turn_score += current_roll\n else:\n current_turn_score = 0\n break\n # after the player end his turn, sum his turn score to his overall\n # score and check if he reached the goal score (100)\n self.state[player.get_index()] += current_turn_score\n if self.state[player.get_index()] >= self.max_score:\n player.add_win()\n for player in self.players:\n player.change_game_stat()\n if print_mode:\n print(GAME_OVER_MSG + player.get_name())\n print()\n print(\"Final score: \")\n for player in self.players:\n print(player.get_name() + \": \" + str(\n self.state[player.get_index()]))\n return GAME_OVER\n return CONTINUE\n\n \"\"\"\n This function responsible for running the game. As long as the game is not\n over, after each player turn it change the current player to the next\n player.\n \"\"\"\n def run_game(self):\n print_mode = int(sys.argv[3])\n while self.play_turn(self.current_player) != GAME_OVER:\n self.current_player = self.players[\n (self.players.index(self.current_player) + 1) %\n self.num_of_players]\n if print_mode:\n print(\"Current score - {\", end=\"\")\n for index, player in enumerate(self.players):\n if index != len(self.players) - 1:\n print(player.get_name() + \": \" + str(\n self.state[player.get_index()]) + \", \", end=\"\")\n else:\n print(player.get_name() + \": \" + str(\n self.state[player.get_index()]) + \"}\")\n print()\n\n return max(self.state, key=self.state.get)\n\n\n\"\"\"\nThis function creates the player objects according to the user input value.\n\"\"\"\n\n\ndef get_players():\n players = []\n # get the user input which contains the players type values and\n # create the player objects accordingly.\n input_list = sys.argv[1].split(',')\n for index, player in enumerate(input_list):\n if player == GOOBI_PLAYER_INDEX:\n goobi_player = GoobiPlayer.GoobiPlayer(\n index, GOOBI_PLAYER_STRING_REP)\n players.append(goobi_player)\n elif player == LEARNING_PLAYER_INDEX:\n learning_player = learningAgent.ValueIterationPlayer(\n index, LEARNING_PLAYER_STRING_REP)\n players.append(learning_player)\n elif player == HEURISTIC_PLAYER_INDEX:\n heuristic_player = HeuristicPlayer.HeuristicPlayer(\n index, HEURISTIC_PLAYER_STRING_REP)\n players.append(heuristic_player)\n elif player == AZULAY_PLAYER_INDEX:\n azulay_player = AzulayPlayer.AzulayPlayer(\n index, AZULAY_PLAYER_STRING_REP)\n players.append(azulay_player)\n elif player == OFIR_PLAYER_INDEX:\n ofir_player = OfirPlayer.OfirPlayer(\n index, OFIR_PLAYER_STRING_REP)\n players.append(ofir_player)\n elif player == HILLEL_PLAYER_INDEX:\n hillel_player = HillelPlayer.HillelPlayer(\n index, HILLEL_PLAYER_STRING_REP)\n players.append(hillel_player)\n elif player == HUMAN_PLAYER_INDEX:\n human_player = HumanPlayer.HumanPlayer(\n index, HUMAN_PLAYER_STRING_REP)\n players.append(human_player)\n elif player == RANDOM_PLAYER_INDEX:\n random_player = RandomPlayer.RandomPlayer(\n index, RANDOM_PLAYER_STRING_REP)\n players.append(random_player)\n elif player == EXPECTI_PLAYER_INDEX:\n expecti_player = ExpectiPlayer.ExpectiPlayer(\n index, EXPECTI_PLAYER_STRING_REP)\n players.append(expecti_player)\n elif player == TSCORING_PLAYER_INDEX:\n tscoring_player = TScoringPlayer.TScoringPlayer(\n index, TSCORING_PLAYER_STRING_REP)\n players.append(tscoring_player)\n elif player == END_RACE_PLAYER_INDEX:\n end_race_player = EndRacePlayer.EndRacePlayer(\n index, END_RACE_PLAYER_STRING_REP)\n players.append(end_race_player)\n elif player == QLEARNING_PLAYER_INDEX:\n q_player = QLearningAgent.QLearningAgent(\n index, QLEARNING_PLAYER_STRING_REP)\n players.append(q_player)\n elif player == APPROX_QLEARNING_PLAYER_INDEX:\n approx_q_player = ApproxQLearningAgent.ApproxQLearningAgent(\n index, APPROX_QLEARNING_STRING_REP)\n players.append(approx_q_player)\n\n return players\n\n\"\"\"\nThis function is the main function which get the user input values and create\na pig game object according to the input of the user.\n\"\"\"\n\n\ndef main():\n # Check that the user entered a valid number of input values and notify\n # the user if he didn't.\n if len(sys.argv) != 5:\n print(\"Invalid Input\\nUsage: \"\n \" \\n\")\n print(\"Players Indexes: \")\n print(\"0 - Goobi player\\n1 - Learning player\\n2 - Heuristic player\\n\"\n \"3 - Azulay player\\n4 - Ofir player\\n5 - Hillel player\\n\"\n \"6 - Human player\\n7 - Random player\\n8 - Expecti player\\n\"\n \"9 - TScoring Player\\n10 - End Race player\\n 11 - Q Player\\n\"\n \"12 - Approx Q Player\")\n return\n # get the input values\n players = get_players()\n number_of_games = int(sys.argv[4])\n shuffle_mode = sys.argv[2]\n for i in range(number_of_games):\n if i % 10000 == 0 and i != 0:\n print(i)\n if int(sys.argv[2]):\n shuffle(players)\n # Create a pig game object and run the game\n pig = PigGame(100, players)\n pig.run_game()\n\n print()\n print(\"Game results: \")\n if int(shuffle_mode):\n print(\"Shuffle mode is on\")\n print()\n # sort the player's scores and print it.\n players.sort(key=lambda x: x.get_wins(), reverse=True)\n for index, player in enumerate(players):\n print(player.get_name() + \" won \" + str(player.get_wins()) + \" games \"\n + \"(\" + str(100 * player.get_wins() / number_of_games) + \"%)\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"gabrielbh/PIG-game","sub_path":"PigGame.py","file_name":"PigGame.py","file_ext":"py","file_size_in_byte":11423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"31170548779","text":"\"\"\"\nx_raw = input(\"Enter a Number: \")\nx = int(x_raw)\nif x < 2 :\n print('Below 2')\nelif x >= 2 :\n print('Two or More')\nelse :\n print('Big Fish')\n\"\"\"\n\"\"\"\nx_raw = input(\"Enter a number: \")\nx = int(x_raw)\n\nif x < 2 :\n print(\"Below 2\")\nelif x < 20 :\n print(\"Below 20\")\nelif x < 10 :\n print(\"Below 10\")\nelse :\n print(\"Something Else\")\n\"\"\"\n\"\"\"\nx = \"My Name is Jeff\"\ntry:\n i = int(x)\nexcept:\n i = -404\nprint(\"Hi\", i)\n\"\"\"\n\"\"\"\nastr = '123x'\ntry:\n aint = int(astr)\nexcept:\n aint = -304\n\nprint(\"Yo!\", aint)\n\"\"\"\n\"\"\"\nastr = \"Bob\"\ntry:\n print(\"Hello!\")\n aint = int(astr)\n print(\"There\")\nexcept:\n aint = -204\nprint(\"Done\", aint)\n\"\"\"\n\"\"\"\nrawstr = input(\"Enter a number: \")\ntry:\n ival = int(rawstr)\nexcept:\n ival = -1\n\nif ival > 0 :\n print('Thanks')\nelse:\n print(\"Not a Number\", ival)\n\"\"\"\n\"\"\"\nx =6\nif x == 6 :\n print('Is 6')\n print('Is Still 6')\n print('Third 6')\n\"\"\"\n\"\"\"\nx = 0\nif x < 2 :\n print('Small')\nelif x < 10 :\n print('Medium')\nelse :\n print('LARGE')\nprint('All done')\n\"\"\"\n\"\"\"\nx = 2.0\nif x < 2 :\n print('Below 2')\nelif x >= 2 :\n print('Two or more')\nelse :\n print('Something else')\n\"\"\"\n\"\"\"\nraw_hours = input(\"Enter Hours:\")\nraw_rate = input(\"Enter Rate:\")\n\ntry:\n hours = float(raw_hours)\n rate = float(raw_rate)\nexcept:\n print(\"Error! Enter Valid Number\")\n quit()\n\nif hours <= 40:\n print(hours * rate)\nelif hours > 40:\n print((40 * rate) + ((hours - 40) * (rate * 1.5)))\nelse:\n print(\"Error\")\n\"\"\"\nraw_input = input(\"Enter Score Grade: \")\n\ntry:\n input = float(raw_input)\nexcept:\n print(\"Error! Enter Valid Score\")\n quit()\n\nif input < 0.6:\n print(\"F\")\nelif 0.60 <= input <= 0.69:\n print(\"D\")\nelif 0.70 <= input <= 0.79:\n print(\"C\")\nelif 0.80 <= input <= 0.89:\n print(\"B\")\nelif 0.90 <= input <= 1.0:\n print(\"A\")\nelse:\n print(\"Please Enter A Score between 0 to 1\")\n","repo_name":"Code5linger/Python-Workbench","sub_path":"Coursera - Programming for Everybody/Week 5/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"19137148513","text":"def solution(numbers):\n numbers = sorted(numbers);\n compareNumber = [0,1,2,3,4,5,6,7,8,9];\n result = set(compareNumber) - set(numbers); \n answer = 0;\n for i in result:\n answer += i;\n return answer\n\nnumbers = 0;\n\nif 0 <= numbers <=9:\n solution(numbers=[1,2,3,4,6,7,8,0]); \n","repo_name":"NICKmop/CODINGTEST_python","sub_path":"LEVEL1/NoneNumberPlus.py","file_name":"NoneNumberPlus.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"70186901654","text":"#!/usr/bin/python3\nimport subprocess as sp\nfrom sys import argv\nfrom datetime import datetime\nfrom requests import get\nimport socket\n\n## Get hosts via arguments or prompt ##\ndef get_hosts(): # Returns host dictionary\n ## Variables ## \n hosts = {}\n http = '://'\n colon = ':'\n slash = '/'\n ## Get host argument ## \n if len(argv) > 1:\n host_string = argv[1].lower().replace(' ','') \n else: host_string = input(\"Enter IP address or ranges: \").lower().replace(' ','')\n ## Isolate scheme ## \n if http in host_string: \n http_index = host_string.find(http)\n hosts['scheme'] = host_string[:http_index+len(http)]\n host_string = host_string[http_index+len(http):]\n else:\n hosts['scheme'] = None\n #print(f\"****Isolate Scheme: \\nHosts['scheme'] = {str(hosts['scheme'])} \\nHost_string = {str(host_string)}\")\n ## Isolate path ##\n if slash in host_string:\n slash_index = host_string.find(slash)\n hosts['path'] = host_string[slash_index:]\n host_string = host_string[:slash_index]\n else:\n hosts['path'] = None\n #print(f\"****Isolate Path: \\nHosts['path'] = {str(hosts['path'])} \\nHost_string = {str(host_string)}\")\n ## Isolate Port ##\n if colon in host_string:\n colon_index = host_string.find(colon)\n hosts['port'] = host_string[colon_index+1:]\n host_string = host_string[:colon_index]\n else:\n hosts['port'] = None\n #print(f\"****Isolate Port: \\nHosts['port'] = {str(hosts['port'])} \\nHost_string = {str(host_string)}\")\n ## Isolate domain ##\n hosts['domain'] = host_string\n #print(f\"****Isolate Domain: \\nHosts['domain'] = {str(hosts['domain'])} \\nHost_string = {str(host_string)}\")\n ## Return dictionary ## \n return hosts\n\n## Get ports via arguments or prompt ##\ndef get_ports(): # Returns list of ports\n if len(argv) > 2:\n ports = argv[2].lower().replace(' ','')\n else: ports = input('Enter ports e.g. \"80,443\": ').lower().replace(' ', '')\n ports = ports.split(\",\") \n if host_dict['port'] != None and host_dict['port'] not in ports:\n ports.append(host_dict['port'])\n return ports\n\n## Allow port to be app name as well as port number ##\n# (item) is port or app name from user input. Will be vaiable 'ports'\ndef common_port_lookup(item):\n apps = {\n \"http\":\"80,443\",\n \"https\":\"80,443\",\n \"ssh\":\"22\",\n \"dns\":\"53\",\n \"smb\":\"445\",\n \"ftp\":\"20,21\",\n \"smtp\":\"25\",\n \"imap\":\"143,993\",\n \"pop\":\"109,110,995\",\n \"snmp\":\"161,l62\",\n \"rdp\":\"3389\",\n \"vnc\":\"5800,5900\",\n \"ldap\":\"389\"\n }\n if item in apps:\n return apps[item] # if user entered an app, returns the port value\n else:\n return item # if port # was entered it keeps it\n\n## Run fuctions to define host and port variables ##\nhost_dict = get_hosts()\nports_list = get_ports()\nhosts = host_dict['domain']\nports = ','.join(ports_list)\n\n#print(f\"hosts = {hosts}\")\n#print(f\"ports = {ports}\")\n\n## Common port lookup ##\n# Turns ports into an array and checks each. Then back to comma delimited string.\nports = \",\".join(map(common_port_lookup,ports.split(\",\")))\n\n## Getting user's internal and external IPs ##\ndef get_internal_ip(dest_ip): \n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((dest_ip, 80))\n ip = (s.getsockname()[0])\n s.close()\n return ip\n\ndef get_external_ip(dest_ip):\n public_ip = get('https://api.ipify.org').text \n return public_ip\n\n## Running NMAP request ##\n# Sets NMAP port flags, if ports exist\ndef nmap_run(ports):\n options = sp.run(['nmap', hosts, '-p', ports, '-Pn'], capture_output=True, text=True)\n if ports == '':\n options = sp.run(['nmap', hosts, '-Pn'], capture_output=True, text=True)\n return options\n\n# Running NMAP and defining variables\nprint(\"Please wait while running scan ...\")\nresult = nmap_run(ports) # Runs nmap and stores result\nresultlist = result.stdout.split('\\n') # Make nmap output a list\n# Inject better \"Starting\" text\ndateandtime = datetime.now().astimezone().strftime(\"%m-%d-%Y %H:%M %p (%Z)\")\nresultlist[0] = f\"\\nStarting at {dateandtime}\" \n\n# Find destination IP in nmap string\ndef nmap_find_ip(string): # Gets the IP at the end of an nmap string\n dest_ip = string.split(' ')[-1].strip('()')\n return dest_ip\n\n# Check if IP is private. Returns True if in RFC 1918\ndef private_ip_check(ip):\n octet = ip.split('.') # Turn IP into array of octets\n octet = list(map(int, octet)) # Turn octets into integers \n if octet[0] == 10:\n return True\n elif octet[0] == 172 and (octet[1] > 15 or octet[1] < 32):\n return True\n elif octet[0] == 192 and octet[1] == 168:\n return True\n else: return False\n\n# If private_ip_check True return int_ip. If False return ext_ip.\ndef int_or_ext_IP(ip):\n if private_ip_check(ip) == True:\n return get_internal_ip(ip)\n elif private_ip_check(ip) == False:\n return get_external_ip(ip)\n else: print(\"Error: IP was somehow not public or private\")\n\n# Insert IP into Nmap output\ndef nmap_inject_ips(list):\n start_msg = \"Nmap scan report\"\n outlist = [] \n for line in list:\n if start_msg in line:\n dest_ip = nmap_find_ip(line)\n source_ip = int_or_ext_IP(dest_ip) \n index = line.index(\"for\") # Nmap scan report for \n my_from = \"from: \"\n #if int_ip == \"VPN is not connected\" and private_ip_check(dest_ip):\n # my_from = \"ERROR: \"\n # Nmap scan report (from ) for \n ip_line = f\"{line[:index]}({my_from}{source_ip}) {line[index:]}\" \n outlist.append(ip_line)\n else: outlist.append(line)\n return outlist # Return modified nmap output\n\n# outlist is equal to the modified nmap output\noutlist = nmap_inject_ips(resultlist)\nprint('\\n'.join(outlist)) # Print list\n","repo_name":"devious21sf/zmap","sub_path":"zmap.py","file_name":"zmap.py","file_ext":"py","file_size_in_byte":5856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"70158149335","text":"import cv2 as cv\r\nimport numpy as np\r\n\r\ndef match_and_draw_key_points(img1=None, name1='', img2=None, name2='', feature_detector_name='sift',\r\n feature_descriptor_name='sift', top_match=50, order=0):\r\n \"\"\"\r\n\r\n :param img1:\r\n :param name1:\r\n :param img2:\r\n :param name2:\r\n :param detector:\r\n :param top_match:\r\n :return:\r\n \"\"\"\r\n descriptor_img1 = None\r\n descriptor_img2 = None\r\n keypoints_img1 = None\r\n keypoints_img2 = None\r\n\r\n if feature_detector_name is None or feature_descriptor_name is None:\r\n raise ValueError('Feature Detector or Descriptor can\\t be none!')\r\n\r\n # feature detector\r\n feature_detector = None\r\n if feature_detector_name == 'sift':\r\n feature_detector = cv.xfeatures2d.SIFT_create()\r\n elif feature_detector_name == 'surf':\r\n feature_detector = cv.xfeatures2d.SURF_create()\r\n if feature_detector is None:\r\n raise ValueError('Feature Detector can\\'t be none')\r\n\r\n # feature descriptor\r\n feature_descriptor = None\r\n if feature_descriptor_name == 'sift':\r\n feature_descriptor = cv.xfeatures2d.SIFT_create()\r\n elif feature_descriptor_name == 'surf':\r\n feature_descriptor = cv.xfeatures2d.SURF_create()\r\n elif feature_descriptor_name == 'freak':\r\n feature_descriptor = cv.xfeatures2d.FREAK_create()\r\n\r\n if feature_descriptor is None:\r\n raise ValueError('Feature Descriptor can\\'t be none')\r\n\r\n # Detector\r\n if feature_descriptor_name == 'surf':\r\n # keypoints and descriptors\r\n keypoints_img1, descriptor_img1 = feature_descriptor.detectAndCompute(img1, None)\r\n keypoints_img2, descriptor_img2 = feature_descriptor.detectAndCompute(img2, None)\r\n\r\n if feature_descriptor_name == 'freak':\r\n kp1 = feature_detector.detect(img1, None)\r\n keypoints_img1, descriptor_img1 = feature_descriptor.compute(img1, kp1)\r\n\r\n kp2 = feature_detector.detect(img2, None)\r\n keypoints_img2, descriptor_img2 = feature_descriptor.compute(img2, kp2)\r\n\r\n #print(descriptor_img1.shape)\r\n #print(descriptor_img2.shape)\r\n\r\n # Brute Force Matcher\r\n bf = cv.BFMatcher()\r\n matches = bf.knnMatch(descriptor_img1, descriptor_img2, k=2)\r\n\r\n # Ratio Test for good matches\r\n good_matches = []\r\n for m, n in matches:\r\n if m.distance < 0.75 * n.distance:\r\n good_matches.append([m])\r\n\r\n # cv2.drawMatchesKnn expects list of lists as matches.\r\n img3 = cv.drawMatchesKnn(img1, keypoints_img1, img2, keypoints_img2, good_matches, None, flags=2)\r\n\r\n plt.figure()\r\n plt.imshow(img3)\r\n plt.title('{}, i\\'th building & j\\'th image {}, p\\'th building and q\\'th image {}.({})'.format(feature_detector_name+'_'+feature_descriptor_name, name1, name2, len(good_matches)))\r\n cv.imwrite('.\\\\2\\\\{}-{}, i\\'th building & j\\'th image {}, p\\'th building and q\\'th image {}.({}).jpg'.format(order, feature_detector_name+'_'+feature_descriptor_name, name1, name2, len(good_matches)), img3)\r\n\r\n\r\nif __name__ == '__main__':\r\n from read_dataset import read_dataset\r\n import matplotlib.pyplot as plt\r\n\r\n # read dataset from file\r\n images, img_family, images_train, training_family, images_test, test_family = read_dataset()\r\n\r\n # len\r\n print(\"dataset: \", len(images), len(img_family), len(images_train),\r\n len(training_family), len(images_test), len(test_family))\r\n\r\n order = 0\r\n\r\n # surf\r\n for i in [0, 5, 10]:\r\n for j in range(1, 5):\r\n match_and_draw_key_points(img1=images[i], name1='{}-{}'.format(i,0), img2=images[i+j],\r\n name2='{}-{}'.format(i,j), feature_detector_name='surf',\r\n feature_descriptor_name='surf', order=order)\r\n order = order + 1\r\n\r\n # ith building with other\r\n match_and_draw_key_points(img1=images[0], name1='{}-{}'.format(0, 0), img2=images[6],\r\n name2='{}-{}'.format(1, 1), feature_detector_name='surf',\r\n feature_descriptor_name='surf', order = order)\r\n order = order + 1\r\n\r\n match_and_draw_key_points(img1=images[0], name1='{}-{}'.format(0, 0), img2=images[14],\r\n name2='{}-{}'.format(2, 5), feature_detector_name='surf',\r\n feature_descriptor_name='surf', order = order)\r\n order = order + 1\r\n\r\n match_and_draw_key_points(img1=images[5], name1='{}-{}'.format(1, 0), img2=images[2],\r\n name2='{}-{}'.format(0, 3), feature_detector_name='surf',\r\n feature_descriptor_name='surf',order = order)\r\n order = order + 1\r\n\r\n match_and_draw_key_points(img1=images[5], name1='{}-{}'.format(1, 0), img2=images[12],\r\n name2='{}-{}'.format(3, 3), feature_detector_name='surf',\r\n feature_descriptor_name='surf',order = order)\r\n order = order + 1\r\n\r\n match_and_draw_key_points(img1=images[10], name1='{}-{}'.format(2, 0), img2=images[2],\r\n name2='{}-{}'.format(0, 3), feature_detector_name='surf',\r\n feature_descriptor_name='surf',order = order)\r\n order = order + 1\r\n\r\n match_and_draw_key_points(img1=images[10], name1='{}-{}'.format(2, 0), img2=images[8],\r\n name2='{}-{}'.format(1, 4), feature_detector_name='surf',\r\n feature_descriptor_name='surf',order = order)\r\n order = order + 1\r\n\r\n # freak\r\n for i in [0, 5, 10]:\r\n for j in range(1, 5):\r\n match_and_draw_key_points(img1=images[i], name1='{}-{}'.format(i,0), img2=images[i+j],\r\n name2='{}-{}'.format(i,j), feature_detector_name='surf',\r\n feature_descriptor_name='freak',order = order)\r\n order = order + 1\r\n\r\n # ith building with other\r\n match_and_draw_key_points(img1=images[0], name1='{}-{}'.format(0, 0), img2=images[6],\r\n name2='{}-{}'.format(1, 1), feature_detector_name='surf',\r\n feature_descriptor_name='freak',order = order)\r\n order = order + 1\r\n match_and_draw_key_points(img1=images[0], name1='{}-{}'.format(0, 0), img2=images[14],\r\n name2='{}-{}'.format(2, 5), feature_detector_name='surf',\r\n feature_descriptor_name='freak',order = order)\r\n order = order + 1\r\n match_and_draw_key_points(img1=images[5], name1='{}-{}'.format(1, 0), img2=images[2],\r\n name2='{}-{}'.format(0, 3), feature_detector_name='surf',\r\n feature_descriptor_name='freak',order = order)\r\n order = order + 1\r\n match_and_draw_key_points(img1=images[5], name1='{}-{}'.format(1, 0), img2=images[12],\r\n name2='{}-{}'.format(3, 3), feature_detector_name='surf',\r\n feature_descriptor_name='freak',order = order)\r\n order = order + 1\r\n match_and_draw_key_points(img1=images[10], name1='{}-{}'.format(2, 0), img2=images[2],\r\n name2='{}-{}'.format(0, 3), feature_detector_name='surf',\r\n feature_descriptor_name='freak',order = order)\r\n order = order + 1\r\n match_and_draw_key_points(img1=images[10], name1='{}-{}'.format(2, 0), img2=images[8],\r\n name2='{}-{}'.format(1, 4), feature_detector_name='surf',\r\n feature_descriptor_name='freak',order = order)\r\n\r\n plt.show()\r\n\r\n","repo_name":"farhad-dalirani/AUT_Computer_Vision","sub_path":"96131125_HW05/problem-2.py","file_name":"problem-2.py","file_ext":"py","file_size_in_byte":7644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"7338862313","text":"import sys\nimport cv2\nimport numpy as np\n\ndef nothing(x):\n\tpass\n\ncolor_space = 'COLOR SPACE - HSV'\ncv2.namedWindow(color_space)\n\nmaxh = 'Max(H)'\nminh = 'Min(H)'\n\nmaxs = 'Max(S)'\nmins = 'Min(S)'\n\nmaxv = 'Max(V)'\nminv = 'Min(V)'\n\ncv2.createTrackbar(minh, color_space,0,179,nothing)\ncv2.createTrackbar(maxh, color_space,0,179,nothing)\ncv2.createTrackbar(mins, color_space,0,255,nothing)\ncv2.createTrackbar(maxs, color_space,0,255,nothing)\ncv2.createTrackbar(minv, color_space,0,255,nothing)\ncv2.createTrackbar(maxv, color_space,0,255,nothing)\n\n\npath = \"\"\nif len(sys.argv) > 1:\n\tpath = sys.argv[1]\n\n# Read image from path\nimage = cv2.imread(path)\n\n# Change color space\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n# Shape of image\nh,w,_ = image.shape\nfixed_size = 300\n\n# Resize image to adjust the screen\nif h > fixed_size:\n\trfactor = fixed_size / h;\n\timage = cv2.resize(image, (round(w*rfactor), round(h*rfactor)))\nelif w > fixed_size:\n\trfactor = fixed_size / w;\n\timage = cv2.resize(image, (round(w*rfactor), round(h*rfactor)))\n\ncv2.imshow('Original', cv2.cvtColor(image, cv2.COLOR_HSV2BGR))\nwhile(True):\n\n\tpos_minh = cv2.getTrackbarPos(minh, color_space)\n\tpos_maxh = cv2.getTrackbarPos(maxh, color_space)\n\tpos_mins = cv2.getTrackbarPos(mins, color_space)\n\tpos_maxs = cv2.getTrackbarPos(maxs, color_space)\n\tpos_minv = cv2.getTrackbarPos(minv, color_space)\n\tpos_maxv = cv2.getTrackbarPos(maxv, color_space)\n\n\tresult = cv2.inRange(image, (pos_minh, pos_mins, pos_minv), (pos_maxh, pos_maxs, pos_maxv))\n\tresult = cv2.bitwise_and(image, image, mask=result)\n\tcv2.imshow(color_space, result)\n\tk = cv2.waitKey(1) & 0xFF\n\tif k == ord('m'):\n\t\tmode = not mode\n\telif k == 27:\n\t\tbreak\n\ncv2.destroyAllWindows()\n","repo_name":"vinihcampos/digiproc","sub_path":"Exercise#5/color_space.py","file_name":"color_space.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"26508178984","text":"import math\nimport time\n\n\ndef task():\n chef = [2,2,5]\n customer = [4,5,6,30,123456789012]\n\n lcm_C = math.lcm(*chef)\n fx = []\n for i in range(lcm_C // min(chef)):\n for ti in chef:\n nti = i * ti\n if (nti < lcm_C):\n fx.append(nti)\n fx.sort()\n # print(lcm_C)\n # print(fx)\n len_fx = len(fx)\n for ci in customer:\n k = (ci-1) // len_fx\n x = (ci-1) % len_fx\n tt = k*lcm_C+fx[x]\n # print(f'{ci}: k={k} x={x} ans={tt}')\n\nstart = time.time_ns()\n\nfor _ in range(100000):\n task()\n\nend = time.time_ns()\n\nprint(end)\nprint(start)\nprint((end-start) / 1e5)","repo_name":"vorasilp/py_workspace","sub_path":"restaurant/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"71036488535","text":"from multiprocessing import Pool\nfrom itertools import product\nimport math\n\n_DATA_FILE = \"2021/data/22_reactor_reboot.txt\"\n_COMMAND_DELIMITER = ' '\n_ON_COMMAND = \"on\"\n_COORD_DELIMITER = ','\n_COORD_OFFSET = 2\n_RANGE_DELIMITER = '..'\n_GRID_MIN = -50\n_GRID_MAX = 50\n_CELL_SIZE = 10000\n_POOL_SIZE = 16\n\nclass BoundingBox:\n def __init__(self, intervals: list[range]) -> None:\n self.intervals = intervals\n\n def intersection(self, other: \"BoundingBox\") -> list[range]:\n if (self and other):\n return BoundingBox([\n range(max(s[0], o[0]), min(s[-1], o[-1])+1)\n for (s, o) in zip(self.intervals, other.intervals)\n ])\n \n return BoundingBox(range(0) for i in range(3))\n\n def __bool__(self) -> bool:\n return all(self.intervals)\n\n def contains(self, point: list[int]) -> bool:\n return all([interval.__contains__(coordinate)\n for (interval, coordinate) in zip(self.intervals, point)])\n\n def __repr__(self) -> str:\n return \"x={:s}\\ty={:s}\\tz={:s}\".format(\n str(self.intervals[0]), str(self.intervals[1]), str(self.intervals[2]))\n def __format__(self, __format_spec: str) -> str:\n return self.__repr__()\n\nclass Command:\n def __init__(self, line: str) -> None:\n (command_string, coords) = tuple(line.split(_COMMAND_DELIMITER))\n (x_coord, y_coord, z_coord) = tuple(coords.split(_COORD_DELIMITER))\n (x_low, x_high) = tuple(x_coord[_COORD_OFFSET:].split(_RANGE_DELIMITER))\n (y_low, y_high) = tuple(y_coord[_COORD_OFFSET:].split(_RANGE_DELIMITER))\n (z_low, z_high) = tuple(z_coord[_COORD_OFFSET:].split(_RANGE_DELIMITER))\n\n self.on = (command_string == _ON_COMMAND)\n self.bbox = BoundingBox([\n range(int(x_low), int(x_high) + 1),\n range(int(y_low), int(y_high) + 1),\n range(int(z_low), int(z_high) + 1),\n ])\n\n def __repr__(self) -> str:\n return \"{:s}\\t{:s}\".format(\"ON\" if self.on else \"off\", self.bbox)\n\nclass CellArgs:\n def __init__(self, commands: list[Command], cell_min: list[int], cell_max: list[int]) -> None:\n self.commands = commands\n self.cell_min = cell_min\n self.cell_max = cell_max\n\ndef process_cell(args: CellArgs) -> int:\n commands = args.commands\n cell_min = args.cell_min\n cell_max = args.cell_max\n\n print(\"Processing cell\", str(cell_min), str(cell_max))\n\n bounds = [set() for i in range(3)]\n cell = BoundingBox([range(min, max) for (min, max) in zip(cell_min, cell_max)])\n for command in commands:\n # print(command)\n constrained_bbox = command.bbox.intersection(cell)\n if (constrained_bbox):\n for (bound, interval) in zip(bounds, constrained_bbox.intervals):\n bound.add(interval.start)\n bound.add(interval.stop)\n\n x_bounds = sorted(bounds[0])\n y_bounds = sorted(bounds[1])\n z_bounds = sorted(bounds[2])\n\n total = 0\n for i in range(len(x_bounds) - 1):\n for j in range(len(y_bounds) - 1):\n for k in range(len(z_bounds) - 1):\n (x, y, z) = (x_bounds[i], y_bounds[j], z_bounds[k])\n volume = (x_bounds[i + 1] - x) * (y_bounds[j + 1] - y) * (z_bounds[k + 1] - z)\n # print(\"Volume of {:d},{:d},{:d}\\tto {:d},{:d},{:d}\\tis {:d}\".format(\n # x,y,z,x_bounds[i+1],y_bounds[j+1],z_bounds[k+1],volume\n # ))\n for command in reversed(commands):\n if (command.bbox.contains([x,y,z])):\n if (command.on):\n total += volume\n break\n\n return total\n\nif __name__ == '__main__':\n with open(_DATA_FILE, \"r\") as input:\n lines = input.readlines()\n\n commands = [Command(line.strip()) for line in lines]\n # print(process_cell(CellArgs(commands, _GRID_MIN, _GRID_MAX + 1)))\n\n grid_min = [math.inf]*3\n grid_max = [-math.inf]*3\n\n for command in commands:\n for i in range(3):\n grid_min[i] = min(grid_min[i], command.bbox.intervals[i].start)\n grid_max[i] = max(grid_max[i], command.bbox.intervals[i].stop)\n \n print(grid_min)\n print(grid_max)\n\n cells = []\n for cell_x in range(grid_min[0], grid_max[0], _CELL_SIZE):\n for cell_y in range(grid_min[1], grid_max[1], _CELL_SIZE):\n for cell_z in range(grid_min[2], grid_max[2], _CELL_SIZE):\n cell_min = [cell_x, cell_y, cell_z]\n cell_max = [cell_x + _CELL_SIZE, cell_y + _CELL_SIZE, cell_z + _CELL_SIZE]\n print(\"Creating cell\", cell_min, cell_max)\n cells.append(CellArgs(commands, cell_min, cell_max))\n\n with Pool(_POOL_SIZE) as p:\n print(sum(p.map(process_cell, cells)))","repo_name":"iandimayuga/adventofcode","sub_path":"2021/22_reactor_reboot.py","file_name":"22_reactor_reboot.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"21757757410","text":"import re\nimport urllib.request\ndef get_content(url):\n html=urllib.request.urlopen(url)\n content=html.read().decode()\n html.close()\n return content\n\ndef get_images(info):\n regex= 'img class=\"BDE_Image\" pic_type=\"0\" width=\".+?\" height=\".+?\" src=\"(.+?\\.jpg)\"'\n pat=re.compile(regex)\n images_code=re.findall(pat,info)\n # return images_code\n i=0\n for images_url in images_code:\n print(images_url)\n urllib.request.urlretrieve(images_url,'F:\\\\%s.jpg'%i)\n i+=1\n\ninfo=get_content(\"http://tieba.baidu.com/p/5096144918?red_tag=j0230121706\")\nget_images(info)\n","repo_name":"jiayunyan/basic-Python","sub_path":"urllib/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"71921946132","text":"import unittest\n\nimport jax\nimport jax.numpy as jnp\nimport numpy as onp\nimport parameterized\n\nfrom fmmax import basis, vector\n\n# Enable 64-bit precision for higher accuracy.\njax.config.update(\"jax_enable_x64\", True)\n\n\nclass ChangeBasisTest(unittest.TestCase):\n def test_basis_cycle(self):\n x = jnp.array([0.2, 0.8])\n y = jnp.array([-0.1, 0.3])\n u = jnp.array([-0.1, -0.7])\n v = jnp.array([0.0, 1.2])\n tu = jax.random.uniform(jax.random.PRNGKey(0), (10, 12))\n tv = jax.random.uniform(jax.random.PRNGKey(1), (10, 12))\n tx, ty = vector.change_vector_field_basis(tu, tv, u, v, x, y)\n tu_recovered, tv_recovered = vector.change_vector_field_basis(\n tx, ty, x, y, u, v\n )\n onp.testing.assert_allclose(tu_recovered, tu, rtol=1e-4)\n onp.testing.assert_allclose(tv_recovered, tv, rtol=1e-4)\n\n\nclass NormalizeTest(unittest.TestCase):\n @parameterized.parameterized.expand(\n [\n (\n vector.normalize_normal,\n [[1 / jnp.sqrt(2), 1.0, 0.0, 1.0, 0.0]],\n [[1 / jnp.sqrt(2), 0.0, 1.0, 0.0, 0.0]],\n ),\n (\n vector.normalize_pol,\n [[1 / jnp.sqrt(2), 0.2 / jnp.sqrt(2), 0.0, 0.01 / jnp.sqrt(2), 0.0]],\n [[1 / jnp.sqrt(2), 0.0, 0.2 / jnp.sqrt(2), 0.0, 0.0]],\n ),\n (\n vector.normalize_jones,\n [[0.5 + 0.5j, 0.734, 0.680, 0.707, 0.707]],\n [[0.5 + 0.5j, 0.680j, 0.734j, 0.707j, 0.707j]],\n ),\n ]\n )\n def test_normalized_matches_expected(self, normalize_fn, expected_tx, expected_ty):\n tx = jnp.asarray([[1.0, 0.2, 0.0, 0.01, 0.0]], dtype=float)\n ty = jnp.asarray([[1.0, 0.0, 0.2, 0.0, 0.0]], dtype=float)\n tx, ty = normalize_fn(tx, ty)\n onp.testing.assert_allclose(tx, jnp.asarray(expected_tx), rtol=1e-3)\n onp.testing.assert_allclose(ty, jnp.asarray(expected_ty), rtol=1e-3)\n\n @parameterized.parameterized.expand(\n [(vector.normalize_normal,), (vector.normalize_pol,), (vector.normalize_jones,)]\n )\n def test_zeros_no_nan(self, normalize_fn):\n tx = jnp.zeros((20, 20))\n ty = jnp.zeros((20, 20))\n tx, ty = normalize_fn(tx, ty)\n self.assertFalse(onp.any(onp.isnan(tx)))\n self.assertFalse(onp.any(onp.isnan(ty)))\n\n @parameterized.parameterized.expand(\n [(vector.normalize_normal,), (vector.normalize_pol,), (vector.normalize_jones,)]\n )\n def test_gradient_no_nan(self, normalize_fn):\n def loss_fn(tx, ty):\n tx, ty = normalize_fn(tx, ty)\n return jnp.real(jnp.sum(tx) + jnp.sum(ty))\n\n gx, gy = jax.grad(loss_fn, argnums=(0, 1))(jnp.zeros((5, 5)), jnp.zeros((5, 5)))\n self.assertFalse(onp.any(onp.isnan(gx)))\n self.assertFalse(onp.any(onp.isnan(gy)))\n\n\n# -----------------------------------------------------------------------------\n# Tests related to the `tangent_field` function.\n# -----------------------------------------------------------------------------\n\n\nclass TangentFieldTest(unittest.TestCase):\n def test_optimize(self):\n arr = jnp.array(\n [[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], dtype=jnp.float32\n )\n tx, ty = vector.tangent_field(\n arr,\n use_jones=False,\n optimizer=vector.OPTIMIZER,\n alignment_weight=vector.ALIGNMENT_WEIGHT,\n smoothness_weight=vector.SMOOTHNESS_WEIGHT,\n steps_dim_multiple=vector.STEPS_DIM_MULTIPLE,\n smoothing_kernel=jnp.ones((1, 1)),\n )\n expected_tx = [\n [\n 0.083,\n 0.25,\n 0.417,\n 0.583,\n 0.75,\n 0.667,\n 0.333,\n 0.0,\n -0.333,\n -0.667,\n -0.75,\n -0.583,\n -0.417,\n -0.25,\n -0.083,\n ]\n ]\n onp.testing.assert_allclose(tx, expected_tx, rtol=0.02)\n onp.testing.assert_allclose(ty, 0.0, atol=1e-7)\n\n def test_optimize_jones(self):\n arr = jnp.array(\n [[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], dtype=jnp.float32\n )\n tx, ty = vector.tangent_field(\n arr,\n use_jones=True,\n optimizer=vector.OPTIMIZER,\n alignment_weight=vector.ALIGNMENT_WEIGHT,\n smoothness_weight=vector.SMOOTHNESS_WEIGHT,\n steps_dim_multiple=vector.STEPS_DIM_MULTIPLE,\n smoothing_kernel=jnp.ones((1, 1)),\n )\n expected_tx_magnitude = jnp.ones_like(arr)\n onp.testing.assert_allclose(jnp.abs(tx), expected_tx_magnitude)\n onp.testing.assert_allclose(ty, 0.0, atol=1e-7)\n\n\nclass SchemesTest(unittest.TestCase):\n @parameterized.parameterized.expand(\n [(vector.JONES_DIRECT,), (vector.JONES,), (vector.POL,), (vector.NORMAL,)]\n )\n def test_batch_matches_single_exact(self, scheme):\n key = jax.random.PRNGKey(0)\n arr = jax.random.uniform(key, shape=(5, 10, 10))\n primitive_lattice_vectors = basis.LatticeVectors(u=basis.X, v=basis.Y)\n expansion = basis.generate_expansion(\n primitive_lattice_vectors=primitive_lattice_vectors,\n approximate_num_terms=10,\n truncation=basis.Truncation.CIRCULAR,\n )\n tx, ty = vector.VECTOR_FIELD_SCHEMES[scheme](\n arr=arr,\n expansion=expansion,\n primitive_lattice_vectors=primitive_lattice_vectors,\n )\n for i in range(5):\n expected_tx_i, expected_ty_i = vector.VECTOR_FIELD_SCHEMES[scheme](\n arr=arr[i, :, :],\n expansion=expansion,\n primitive_lattice_vectors=primitive_lattice_vectors,\n )\n onp.testing.assert_allclose(tx[i, :, :], expected_tx_i)\n onp.testing.assert_allclose(ty[i, :, :], expected_ty_i)\n\n @parameterized.parameterized.expand(\n [(scheme,) for scheme in vector.VECTOR_FIELD_SCHEMES]\n )\n def test_uniform_array_no_nan(self, scheme):\n # The tangent field calculation requires special logic to handle uniform arrays,\n # otherwise `nan` will show up in the result.\n arr = jnp.ones((10, 10))\n primitive_lattice_vectors = basis.LatticeVectors(u=basis.X, v=basis.Y)\n expansion = basis.generate_expansion(\n primitive_lattice_vectors=primitive_lattice_vectors,\n approximate_num_terms=10,\n truncation=basis.Truncation.CIRCULAR,\n )\n tx, ty = vector.VECTOR_FIELD_SCHEMES[scheme](\n arr=arr,\n expansion=expansion,\n primitive_lattice_vectors=primitive_lattice_vectors,\n )\n self.assertFalse(onp.any(onp.isnan(tx)))\n self.assertFalse(onp.any(onp.isnan(ty)))\n\n @parameterized.parameterized.expand(\n [(scheme,) for scheme in vector.VECTOR_FIELD_SCHEMES]\n )\n def test_gradient_no_nan(self, scheme):\n primitive_lattice_vectors = basis.LatticeVectors(u=basis.X, v=basis.Y)\n expansion = basis.generate_expansion(\n primitive_lattice_vectors=primitive_lattice_vectors,\n approximate_num_terms=10,\n truncation=basis.Truncation.CIRCULAR,\n )\n\n def _loss_fn(arr):\n tx, ty = vector.VECTOR_FIELD_SCHEMES[scheme](\n arr=arr,\n expansion=expansion,\n primitive_lattice_vectors=basis.LatticeVectors(u=basis.X, v=basis.Y),\n )\n return jnp.sum(jnp.abs(tx) ** 2 + jnp.abs(ty) ** 2)\n\n arr = jnp.asarray(\n [\n [1.0 + 1.0j, 1.0, 1.0, 1.0, 1.0 + 1.1j, 1.0 + 1.1j],\n [1.0 + 1.0j, 0.0, 0.0, 0.0, 1.0 + 1.1j, 1.0 + 1.1j],\n [1.0 + 1.0j, 0.0, 0.0, 0.0, 1.0 + 1.1j, 1.0 + 1.1j],\n [1.0 + 1.0j, 1.0, 1.0, 1.0, 1.0 + 1.1j, 1.0 + 1.1j],\n ]\n )\n grad = jax.grad(_loss_fn)(arr)\n self.assertFalse(onp.any(onp.isnan(grad)))\n\n\nclass LossTest(unittest.TestCase):\n @parameterized.parameterized.expand(\n [\n (1.0, 0.0, 1.0, 0.0, -1.0),\n (-1.0, 0.0, 1.0, 0.0, -1.0),\n (0.0, 1.0, 1.0, 0.0, 0.0),\n ]\n )\n def test_self_alignment_loss(self, tx, ty, tx0, ty0, expected):\n tx = jnp.asarray(tx)[jnp.newaxis, jnp.newaxis]\n ty = jnp.asarray(ty)[jnp.newaxis, jnp.newaxis]\n tx0 = jnp.asarray(tx0)[jnp.newaxis, jnp.newaxis]\n ty0 = jnp.asarray(ty0)[jnp.newaxis, jnp.newaxis]\n loss = vector._self_alignment_loss(tx, ty, tx0, ty0)\n onp.testing.assert_allclose(loss, expected)\n\n @parameterized.parameterized.expand(\n [\n (\n jnp.ones((2, 2)),\n jnp.zeros((2, 2)),\n jnp.ones((2, 2)),\n jnp.zeros((2, 2)),\n -400,\n ),\n (\n jnp.asarray([[1, 1], [-1, -1], [-1, -1], [1, 1]]),\n jnp.zeros((4, 2)),\n jnp.ones((4, 2)),\n jnp.zeros((4, 2)),\n -768.0,\n ),\n ]\n )\n def test_field_loss(self, tx, ty, tx0, ty0, expected):\n loss = vector._field_loss(\n tx, ty, tx0, ty0, alignment_weight=100, smoothness_weight=2\n )\n onp.testing.assert_allclose(loss, expected)\n\n def test_field_loss_batch_matches_single(self):\n key = jax.random.PRNGKey(0)\n tx, ty, tx0, ty0 = jax.random.uniform(key, (4, 8, 5, 10))\n loss = vector._field_loss(\n tx, ty, tx0, ty0, alignment_weight=100, smoothness_weight=2\n )\n expected_loss = 0\n for tx_slice, ty_slice, tx0_slice, ty0_slice in zip(tx, ty, tx0, ty0):\n expected_loss += vector._field_loss(\n tx_slice,\n ty_slice,\n tx0_slice,\n ty0_slice,\n alignment_weight=100,\n smoothness_weight=2,\n )\n onp.testing.assert_allclose(loss, expected_loss, rtol=1e-6)\n\n\nclass UtilitiesTest(unittest.TestCase):\n @parameterized.parameterized.expand(\n [\n ((1.0, 1.0), 1.0, (jnp.sqrt(0.5), jnp.sqrt(0.5))),\n ((0.1, 0.1), 1.0, (0.1, 0.1)),\n ((2.0, 0.0), 1.0, (1.0, 0.0)),\n ((2.0, 0.0), 1.5, (1.5, 0.0)),\n ((2.0j, 0.0), 1.5, (1.5j, 0.0)),\n ]\n )\n def test_clip_magnitude(self, tx_ty, max_magnitude, expected):\n result = vector._clip_magnitude(*tx_ty, max_magnitude)\n onp.testing.assert_allclose(result, expected)\n\n @parameterized.parameterized.expand(\n [\n ((1.0, 0.0), (1.0, 0.0)),\n ((1.0j, 0.0), (1.0, 0.0)),\n ((1.0j, 1.0), (jnp.sqrt(0.5) * (1.0 + 1.0j), jnp.sqrt(0.5) * (1.0 - 1.0j))),\n ]\n )\n def test_remove_average_phase(self, tx_ty, expected):\n tx, ty = tx_ty\n tx = jnp.asarray(tx)[jnp.newaxis, jnp.newaxis]\n ty = jnp.asarray(ty)[jnp.newaxis, jnp.newaxis]\n result_tx, result_ty = vector._remove_mean_phase(tx, ty)\n onp.testing.assert_allclose(\n (result_tx.squeeze(), result_ty.squeeze()), expected\n )\n","repo_name":"facebookresearch/fmmax","sub_path":"tests/fmmax/test_vector.py","file_name":"test_vector.py","file_ext":"py","file_size_in_byte":11276,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"67"}
+{"seq_id":"33894604182","text":"import numpy as np\nimport torch\nimport pyspiel\n\ngame = pyspiel.load_game(\"quoridor(ansi_color_output=true,board_size=3,wall_count=3)\")\nstate = game.new_initial_state()\n\nwins, draws, loses = (0, 0, 0)\nfor i in range(200):\n state = game.new_initial_state()\n while not state.is_terminal():\n action = np.random.choice(state.legal_actions())\n state.apply_action(action)\n #print(str(state) + '\\n')\n rewards = state.rewards()\n if (rewards[1] == 1): wins += 1\n if (rewards[1] == 0): draws += 1\n if (rewards[1] == -1): loses += 1\nprint(wins, draws, loses)","repo_name":"Mijova/QuoridorAI","sub_path":"env_test.py","file_name":"env_test.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"40948744509","text":"#!/usr/bin/env python\nfrom datetime import datetime\nimport sys\n\nfrom file_read_backwards import FileReadBackwards\n\nlog_file = '/var/log/borg.log'\n\nwith FileReadBackwards(log_file, encoding='utf-8') as f:\n\n date_str = \"\"\n\n while True:\n line = f.readline()\n if not line:\n break\n if 'Backup created.' in line:\n date_str = line.split('-')[0].strip()\n break\n\ndate = datetime.strptime(date_str, \"%a %d %b %Y %I:%M:%S %p %Z\")\ndelta = datetime.now() - date\n\ndelta_seconds = delta.days*86399 + delta.seconds\n\nif delta_seconds < 0:\n seconds = round(delta_seconds, 2)\n print(f\"[{seconds} sec]\")\nelif delta_seconds < 3600:\n minutes = round(delta_seconds / 60, 2)\n print(f\"[{minutes} min]\")\nelif delta_seconds < 86400:\n hours = round(delta_seconds / 3600, 2)\n print(f\"[{hours} hrs]\")\nelse:\n days = round(delta_seconds / 86400)\n print(f\"[{days} days]\")\n","repo_name":"seantur/.dotfiles","sub_path":"polybar/.config/polybar/getbackuptime.py","file_name":"getbackuptime.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"933017503","text":"# dict={'log_id': 5891599090191187877, 'result_num': 1, 'result': [{'probability': 0.9882395267486572, 'top': 205, 'height': 216, 'classname': 'Face', 'width': 191, 'left': 210}]}\n#\n# # 访问dict下的result列表的值:\n# print(dict['result'][0]['top'] )\n# # dict下的result列表的第一个值(字典)的top内容\n#\n# # 也可以使用临时变量:\n# dict1=dict['result']\n# print(dict1[0]['probability'])\n\n\nc = {\n \"forchange\":\n [\n {\"name\": \"backer\", \"age\": 5},\n {\"name\": \"willie\", \"age\": 18},\n {\"name\": \"penny\", \"age\": 20},\n ]\n\n }\n\nprint(c[\"forchange\"][0][\"name\"])\n","repo_name":"aaneack/Python_Learning","sub_path":"Python/参考模版/列表-字典-多层嵌套.py","file_name":"列表-字典-多层嵌套.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"2976812105","text":"import io\nimport random\nimport string\nimport pathlib\nfrom io import StringIO\nfrom word_vectors import FileType\nfrom word_vectors.utils import is_binary, find_space, bookmark, to_vocab, create_output_path\nfrom utils import DATA, GLOVE, W2V, W2V_TEXT, LEADER, rand_str\n\n\ndef test_enum_parse():\n gold_mapping = {\n \"glove\": FileType.GLOVE,\n \"GloVe\": FileType.GLOVE,\n \"w2v_text\": FileType.W2V_TEXT,\n \"w2v-text\": FileType.W2V_TEXT,\n \"w2v\": FileType.W2V,\n \"leader\": FileType.LEADER,\n \"fasttext\": FileType.FASTTEXT,\n \"fast-text\": FileType.FASTTEXT,\n \"fast_text\": FileType.FASTTEXT,\n \"numberbatch\": FileType.NUMBERBATCH,\n }\n for s, t in gold_mapping.items():\n assert FileType.from_string(s) is t\n\n\ndef test_is_binary():\n file_to_gold = {DATA / GLOVE: False, DATA / W2V: True, DATA / W2V_TEXT: False, DATA / LEADER: True}\n for file_name, gold in file_to_gold.items():\n assert is_binary(file_name) == gold\n\n\ndef test_find_space():\n gold = rand_str()\n gold_offset = len(gold.encode(\"utf-8\")) + 1\n extra = rand_str()\n text = f\"{gold} {extra}\".encode(\"utf-8\")\n word, offset = find_space(text, offset=0)\n assert word == gold\n assert offset == gold_offset\n\n\ndef test_find_space_with_offset():\n before = rand_str()\n start = len(before.encode(\"utf-8\")) + 1\n gold = rand_str()\n gold_offset = start + len(gold.encode(\"utf-8\")) + 1\n after = rand_str()\n text = f\"{before} {gold} {after}\".encode(\"utf-8\")\n word, offset = find_space(text, offset=start)\n\n\ndef test_bookmark():\n data = StringIO(\"bad\\ngood\\nbad\")\n _ = data.readline()\n with bookmark(data):\n line = data.readline()\n next_line = data.readline()\n assert next_line == line\n\n\ndef test_to_vocab():\n vocab = list(\"ABCDEFGHIJKLMNOP\")\n random.shuffle(vocab)\n d = {k: i for i, k in enumerate(vocab)}\n assert to_vocab(vocab) == d\n\n\ndef test_create_output_path():\n ext = rand_str()\n base = rand_str()\n file_type = random.choice(list(FileType))\n path = f\"{ext}.{base}\"\n gold = f\"{ext}.{file_type}\"\n assert create_output_path(path, file_type) == gold\n\n\ndef test_create_output_path_pathlib():\n ext = rand_str()\n base = rand_str()\n file_type = random.choice(list(FileType))\n path = pathlib.Path(f\"{ext}.{base}\")\n gold = f\"{ext}.{file_type}\"\n assert create_output_path(path, file_type) == gold\n\n\ndef test_create_output_path_open():\n ext = rand_str()\n base = rand_str()\n file_type = random.choice(list(FileType))\n path = f\"{ext}.{base}\"\n path_file = io.StringIO(path)\n path_file.name = path\n gold = f\"{ext}.{file_type}\"\n assert create_output_path(path_file, file_type) == gold\n\n\ndef test_create_output_path_open_bytes():\n ext = rand_str()\n base = rand_str()\n file_type = random.choice(list(FileType))\n path = f\"{ext}.{base}\"\n path_file = io.BytesIO(path.encode(\"utf-8\"))\n path_file.name = path\n gold = f\"{ext}.{file_type}\"\n assert create_output_path(path_file, file_type) == gold\n","repo_name":"blester125/word-vectors","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"}
+{"seq_id":"19802503866","text":"#1. Create a greeting for your program.\nprint(\"Sawadee Krup!\")\n#2. Ask the user for the city that they grew up in.\nprovince = input(\"What province are you from?\\n\")\n#3. Ask for their favourite verb.\nverb = input(\"What's your favourite verb?\\n\")\n#4. Combine the name of their city and verb and show them their Football Team name.\nfootball_club_name = province + \" \" + verb + \" \" + \"Football Club\"\n#5. Make sure the input cursor shows on a new line:\nprint(\"Your Football Club name is \" + football_club_name)\n\n","repo_name":"BandsThoBaby/FCNameGenerator","sub_path":"FC_name_generator.py","file_name":"FC_name_generator.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"27977236405","text":"import numpy as np\nfrom ..util.errors import NumericalPrecisionError\nfrom ..snnls.giga import GIGA\nfrom .coreset import Coreset\n\nclass HilbertCoreset(Coreset):\n def __init__(self, data, projector, n_subsample=None, snnls=GIGA, **kw):\n self.data = data\n self.projector = projector\n self.snnls_class = snnls\n self.snnls = None\n super().__init__(**kw)\n\n def reset(self):\n if self.snnls is not None:\n self.snnls.reset()\n super().reset()\n\n def _build_projector(self, size):\n cts = []\n ct_idcs = []\n for i in range(size):\n f = np.random.randint(self.data.shape[0])\n if f in ct_idcs:\n cts[ct_idcs.index(f)] += 1\n else:\n ct_idcs.append(f)\n cts.append(1)\n wts = self.data.shape[0] * np.array(cts) / np.array(cts).sum()\n idcs = np.array(ct_idcs)\n self.projector.update(wts, self.data[idcs,:])\n\n def _build(self, size):\n\n # build a projector using a uniformly random coreset\n self._build_projector(size)\n\n # project the data log likelihoods\n vecs = self.projector.project(self.data)\n\n # construct the snnls object \n self.snnls = self.snnls_class(vecs.T, vecs.sum(axis=0))\n\n # build the coreset\n self.snnls.build(size)\n\n # extract the results from the snnls object\n w = self.snnls.weights()\n self.wts = w[w>0]\n self.idcs = np.where(w>0)[0]\n self.pts = self.data[self.idcs]\n\n def _optimize(self):\n self.snnls.optimize()\n w = self.snnls.weights()\n self.wts = w[w>0]\n self.idcs = self.sub_idcs[w>0]\n self.pts = self.data[self.idcs]\n\n def error(self):\n return self.snnls.error()\n","repo_name":"trevorcampbell/quasi-newton-coresets-experiments","sub_path":"bayesiancoresets/coreset/hilbert.py","file_name":"hilbert.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"}
+{"seq_id":"30761970132","text":"import datetime\n\n\ndef question4():\n\n date = input(\"Date(MM/DD/YYYY): \")\n\n month = int(date[0:2])\n day = int(date[3:5])\n year = int(date[6:10])\n\n printdate = datetime.datetime(year, month, day)\n print(printdate.strftime(\"%A\")+\", \"+printdate.strftime(\"%B\")+\" \"+str(day)+\", \"+printdate.strftime(\"%Y\"))\n\n\nquestion4()","repo_name":"emillyly/CS-Learning","sub_path":"ProgrammingLanguages/hw1/p4.py","file_name":"p4.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"70816068694","text":"\"\"\"\nA linked list is given such that each node contains an additional random pointer which could point to any node in the list or null.\n\nReturn a deep copy of the list.\n\n\nExample\nChallenge \nCould you solve it with O(1) space?\n\"\"\"\n\n\"\"\"\nDefinition for singly-linked list with a random pointer.\nclass RandomListNode:\n def __init__(self, x):\n self.label = x\n self.next = None\n self.random = None\n\"\"\"\n\n\nclass Solution:\n # @param head: A RandomListNode\n # @return: A RandomListNode\n def copyRandomList(self, head):\n # write your code here\n dic = {}\n tmp = head\n while tmp:\n dic[tmp] = RandomListNode(tmp.label)\n tmp = tmp.next\n tmp = head\n while head:\n if head.next:\n dic[head].next = dic[head.next]\n if head.random:\n dic[head].random = dic[head.random]\n head = head.next\n return dic[tmp]\n\n\"\"\"\nDefinition for singly-linked list with a random pointer.\nclass RandomListNode:\n def __init__(self, x):\n self.label = x\n self.next = None\n self.random = None\n\"\"\"\n\n\nclass Solution:\n # @param head: A RandomListNode\n # @return: A RandomListNode\n def copyRandomList(self, head):\n # write your code here\n if not head:\n return head\n self.copy_next(head)\n self.copy_random(head)\n return self.split_list(head)\n \n \n \n def copy_next(self, head):\n while head:\n node = RandomListNode(head.label)\n node.next = head.next\n head.next = node\n head = head.next.next\n \n \n def copy_random(self, head):\n while head:\n if head.random:\n head.next.random = head.random.next\n head = head.next.next\n \n \n def split_list(self, head):\n new_head = head.next\n while head:\n tmp = head.next\n head.next = tmp.next\n head = tmp.next\n if head:\n tmp.next = head.next\n \n return new_head\n \n \n \n \n","repo_name":"daishengliang/coding-challenge","sub_path":"high-frequent/Amazon OA High Frequent 9 Problems/Copy List with Random Pointer.py","file_name":"Copy List with Random Pointer.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"38709671901","text":"#!/usr/bin/python\n\nfrom flask import Flask\nfrom flask_restful import reqparse, abort, Api, Resource\nimport pickle\nimport numpy as np\nimport spacy\n\napp = Flask(__name__)\napi = Api(app)\n\n\nner_path = 'models/baseline'\nnlp = spacy.load(ner_path)\n\n\n# argument parsing\nparser = reqparse.RequestParser()\nparser.add_argument('query')\n\n\nclass ReturnEntities(Resource):\n def get(self):\n # use parser and find the user's query\n args = parser.parse_args()\n user_query = args['query']\n\n\n doc = nlp(user_query)\n\n output = {}\n for ent in doc.ents:\n output[ent.text] = ent.label_\n \n \n\n # create JSON object\n # output = {'entities': entity, 'text': text}\n \n return output\n\n\n# Setup the Api resource routing here\n# Route the URL to the resource\napi.add_resource(ReturnEntities, '/')\n\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"tonycolucci/MSIA414_FinalProject","sub_path":"src/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"19924960736","text":"import types\nfrom nltk.metrics.agreement import AnnotationTask\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import f1_score, precision_score, recall_score\nfrom parser import *\nfrom parse_project import parse_project, parse_corpus\nfrom functools import partial\nimport re\nimport util\nfrom pathlib import Path\nfrom itertools import groupby, product, combinations, chain\nfrom collections import Counter\nimport settings\nimport pandas as pd\nimport numpy as np\nimport numpy.testing as npt\n\n\ndef make_csv(data, opt_dirp=\"agreestat-sentiment-span-iaa-files\"):\n\n Path(opt_dirp).mkdir(parents=True, exist_ok=True)\n\n d = {}\n\n for anno_id, itm, label in data:\n d.setdefault(itm, []).append({anno_id: label})\n\n df = pd.Series(d).apply(\n lambda x: pd.Series({k: v for y in x for k, v in y.items()})\n )\n df.to_csv(Path(opt_dirp) / \"agreestat_interrater_data.csv\", index=False)\n\n\nclass CustomAnnotationTask(AnnotationTask):\n \"\"\"\n Wrapper object aorund nltk.agreement.AnnotationTask object that allows for frp metrics to be computed.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.label_encoder = LabelEncoder().fit(np.array(list(self.K)))\n self.sk_labels = self._get_scikit_labels()\n\n self.metrics = {\n \"Fleiss' kappa\": self.multi_kappa,\n \"Cohen's kappa\": self.kappa,\n \"Krippendorff's alpha\": self.alpha,\n \"Weighted kappa\": self.weighted_kappa,\n \"S-score\": self.S,\n \"Scott's pi (multi)\": self.pi,\n \"F1-score\": f1_score,\n \"Precision\": precision_score,\n \"Recall\": recall_score,\n # \"Accuracy\": accuracy_score\n }\n\n # set distance func\n if isinstance(\n self.distance, tuple\n ): # if string it should be a function of this obj\n func_name, dist_kwargs = self.distance[0], self.distance[1]\n self.distance = partial(getattr(self, func_name), **dist_kwargs)\n elif callable(self.distance): # else it should be a passed function\n pass\n else:\n raise ValueError(\n f'{self.distance} should be a tuple or a dict (\"func name of class method\", kwargs).'\n )\n\n def load_array(self, array):\n \"\"\"Load an sequence of annotation results, appending to any data already loaded.\n\n The argument is a sequence of 3-tuples, each representing a coder's labeling of an item:\n (coder,item,label)\n \"\"\"\n for coder, item, labels in array:\n if isinstance(self.distance, tuple) and \"windowed\" in self.distance[0]:\n labels = labels[0]\n self.C.add(coder)\n self.K.add(labels)\n self.I.add(item)\n self.data.append({\"coder\": coder, \"labels\": labels, \"item\": item})\n\n def compute_all(self, average=\"binary\"):\n all_results = {}\n for name, func in self.metrics.items():\n if isinstance(func, types.MethodType):\n all_results[name] = func()\n else:\n all_results[name] = self.scikit_metric_pairwise(func, average=average)\n\n return all_results\n\n def _get_scikit_labels(self):\n sk_labels = []\n key = lambda x: x[\"coder\"]\n data = self.data[:]\n data.sort(key=key)\n for item, item_data in groupby(data, key=key):\n labels_ann = self.label_encoder.transform(\n [\n idat[\"labels\"][0]\n if isinstance(idat[\"labels\"], tuple)\n else idat[\"labels\"]\n for idat in item_data\n ]\n )\n sk_labels.append(labels_ann)\n return sk_labels\n\n def scikit_metric_pairwise(self, func, **kwargs):\n total = []\n s = self.sk_labels[:]\n for lab1 in self.sk_labels:\n s.remove(lab1)\n for lab2 in s:\n total.append(func(lab1, lab2, **kwargs))\n ret = np.mean(total, axis=0)\n return ret\n\n\ndef dice_coefficient(a, b):\n \"\"\"\n :param a: list of items\n :param b: list of items\n :return: dice coefficient score\n \"\"\"\n\n a.sort()\n b.sort()\n if not len(a) or not len(b):\n return 0.0\n \"\"\" quick case for true duplicates \"\"\"\n if a == b:\n return 1.0\n\n # assignments to save function calls\n lena = len(a)\n lenb = len(b)\n\n # count match\n matches = len(set(a) & set(b))\n\n score = 2 * float(matches) / float(lena + lenb)\n return score\n\n\ndef compute_overlap_stats(proj):\n\n exact_overlaps = [find_exact_overlap(d.events) for d in proj.annotation_documents]\n exact_overlaps = [x for x in exact_overlaps if x]\n exact_overlaps = util.flatten(exact_overlaps)\n\n overlaps_cnt = len(exact_overlaps)\n overlap_events_cnt = sum(len(x) for x in exact_overlaps)\n all_ev = []\n for d in proj.annotation_documents:\n for ev in d.events:\n all_ev.append(ev)\n all_ev_cnt = len(all_ev)\n overlap_pct = round(100 * overlap_events_cnt / all_ev_cnt, 1)\n return {\n \"overlap_pct\": overlap_pct,\n \"overlaps_cnt\": overlaps_cnt,\n \"overlapping_event_cnt\": overlap_events_cnt,\n }\n\n\ndef score_dice_pairwise(annotations_x, annotations_y):\n\n pairwise_dice_scores = np.zeros(shape=(len(annotations_x), len(annotations_y)))\n\n # score all events with dice coefficient\n for i, x in enumerate(annotations_x):\n\n for j, y in enumerate(annotations_y):\n\n dice_score = dice_coefficient(x, y)\n\n pairwise_dice_scores[i, j] = dice_score\n\n return pairwise_dice_scores\n\n\ndef unit_test():\n\n # test pairwise dice scoring\n x_y_token_ids = [\n # ([[\"1\"]], [[\"1\"]], [[1.]]), # full match\n # ([[\"1\", \"2\"]], [[\"2\"]], [[0.66]]), # test scoring\n # ([[\"1\"], [\"2\"]], [[\"3\"], [\"4\"]], [[0., 0.], [0., 0.]]), # no match multiple\n # ([[\"1\"], [\"2\"]], [[\"2\"], [\"3\"], [\"4\"]], [[0., 0., 0.], [1., 0., 0.]]), # one match, multiple\n (\n [[\"1\", \"2\"], [\"2\"]],\n [[\"1\"], [\"2\"]],\n [[0.66, 0.66], [0.0, 1.0]],\n ), # x self-overlaps\" solution 0>0, 1>1 REQUIRES RESOLUTION\n (\n [[\"1\"], [\"2\"]],\n [[\"1\", \"2\"], [\"2\"]],\n [[0.66, 0.0], [0.66, 1.0]],\n ), # y self-overlaps\" solution 0>0, 1>1 REQUIRES RESOLUTION\n (\n [[\"1\"], [\"2\"]],\n [[\"1\", \"2\"], [\"2\", \"3\"]],\n [[0.66, 0.0], [0.66, 0.66]],\n ), # y-self overlaps, different boundaries: REQUIRES label check\n ]\n for x, y, correct in x_y_token_ids:\n dice_scores = score_dice_pairwise(x, y)\n dice_scores_orig = dice_scores.copy()\n npt.assert_almost_equal(\n np.array(correct), dice_scores, decimal=2\n ) # assert equal with decimal precision 2\n\n mask = dice_scores == dice_scores.max(axis=0, keepdims=True)\n dice_scores[~mask] = 0.0\n mask2 = dice_scores == dice_scores.max(axis=1, keepdims=True)\n dice_scores[~mask2] = 0.0\n pass\n pass\n\n\ndef flatten(l):\n return [item for sublist in l for item in sublist]\n\n\ndef merge_tuples(edges):\n\n from collections import defaultdict\n\n def dfs(adj_list, visited, vertex, result, key):\n visited.add(vertex)\n result[key].append(vertex)\n for neighbor in adj_list[vertex]:\n if neighbor not in visited:\n dfs(adj_list, visited, neighbor, result, key)\n\n adj_list = defaultdict(list)\n for x, y in edges:\n adj_list[x].append(y)\n adj_list[y].append(x)\n\n result = defaultdict(list)\n visited = set()\n for vertex in adj_list:\n if vertex not in visited:\n dfs(adj_list, visited, vertex, result, vertex)\n\n return list(result.values())\n\n\ndef resolve_self_overlap(df, category_name, extent):\n # issue assign group by overlap\n\n # 1. isolate\n\n # get self-overlap: anno_id has multiple same group_id\n self_overlap = df.duplicated(subset=[\"anno_id\", \"group_id\"], keep=False)\n if self_overlap.any():\n problem_groups = df[self_overlap].group_id.unique()\n df_resolve = df[df.group_id.isin(problem_groups)].sort_values(\"group_in_doc\")\n pass\n for group_id, group_df in df_resolve.groupby([\"group_in_doc\"]):\n # if in same overlap-group: split group by most overlap per anno\n new_groups = []\n for (anno_id1, anno_group1), (anno_id2, anno_group2) in combinations(\n group_df.groupby([\"anno_id\"]), 2\n ):\n pairwise_dice = pd.DataFrame(\n index=anno_group1.index, columns=anno_group2.index, dtype=float\n )\n for x in pairwise_dice.index: # pairwise matching\n for y in pairwise_dice.columns:\n x_token_ids = group_df.unit.loc[x].get_extent_token_ids(\n extent=[]\n )\n y_token_ids = group_df.unit.loc[y].get_extent_token_ids(\n extent=[]\n )\n dice_score = dice_coefficient(x_token_ids, y_token_ids)\n pairwise_dice.loc[x, y] = dice_score\n\n # select max dicescore as match, if multiple max -> need to look at label to disambiguate\n match_idc = (\n pairwise_dice[pairwise_dice == pairwise_dice.values.max()]\n .stack()\n .index.tolist()\n )\n\n if (\n len(match_idc) > 1\n and match_idc == pairwise_dice.stack().index.tolist()\n ): # multiple max -> need resolution of labels\n for (x, y) in match_idc:\n if df.loc[x][category_name] == df.loc[y][category_name]:\n new_groups.append((x, y))\n else:\n new_groups.extend(match_idc)\n\n new_groups = merge_tuples(new_groups)\n # exceptional case with split annotations after merge: same group [[1-2_anno_x]] [[3-4 anno_x]][[1-4 anno_y]]:\n # > create 2 groups\n newer_groups = []\n for newgroup in new_groups:\n newgroup_df = df.loc[newgroup].sort_values(\"anno_id\")\n split_overlap = newgroup_df.duplicated(subset=[\"anno_id\"], keep=False)\n if split_overlap.any():\n x_idc = split_overlap.index[split_overlap].tolist()\n y_idc = split_overlap.index[~split_overlap].tolist()\n # select the largest overlap\n mean_dice_coefs = []\n for x in x_idc:\n x_token_ids = newgroup_df.unit.loc[x].get_extent_token_ids(\n extent=extent\n )\n dice_scores = [\n dice_coefficient(\n x_token_ids,\n newgroup_df.unit.loc[y].get_extent_token_ids(\n extent=extent\n ),\n )\n for y in y_idc\n ]\n mean_dice_coefs.append(np.mean(dice_scores))\n keep = x_idc[np.argmax(mean_dice_coefs)]\n newgroup = [keep] + y_idc\n newer_groups.append(newgroup)\n\n # add every unmatched as new group\n for idx in group_df.index:\n if idx not in flatten(newer_groups):\n newer_groups.append([idx])\n\n # change group_id\n for i, g in enumerate(newer_groups):\n for idx in g:\n df.loc[idx, \"group_id\"] = df.loc[idx, \"group_id\"] + \"_\" + str(i + 1)\n return df\n else:\n return df\n\n\ndef overlap_match_to_agreestat(\n proj, unit_name, extent=[], category_names=[\"polarity_sentiment\"]\n):\n def union(data):\n from intervaltree import IntervalTree, Interval\n\n t = IntervalTree().from_tuples((begin, end + 1) for begin, end in data)\n t.merge_overlaps(strict=True)\n return sorted(t.all_intervals)\n\n corpus_dfs = []\n doc_dfs = {cat: [] for cat in category_names}\n\n kf = lambda x: x.title\n for doc_id, docs in groupby(sorted(proj.annotation_documents, key=kf), kf):\n units = [] # collect all units from all annotators across the document group\n for doc in docs:\n units.extend(getattr(doc, unit_name))\n\n data = {\n \"anno_id\": [],\n \"doc_id\": [],\n \"text\": [],\n \"begin\": [],\n \"end\": [],\n }\n\n data[\"unit\"] = units\n\n for u in units:\n tokens = u.get_extent_tokens(extent=extent)\n data[\"anno_id\"].append(u.annotator_id)\n data[\"doc_id\"].append(u.document_title.split(\"_\")[0])\n data[\"text\"].append(u.text)\n data[\"begin\"].append(tokens[0].index)\n data[\"end\"].append(tokens[-1].index)\n\n for cat in category_names: # add category label values\n data.setdefault(cat, []).append(getattr(u, cat))\n\n df = pd.DataFrame(data)\n # remove any exactly overlapping annos from same anno (redundant)\n # df = df.drop_duplicates(subset=[\"anno_id\", \"begin\", \"end\", cat])\n # Create a list of intervals\n df[\"begin_end\"] = df[[\"begin\", \"end\"]].apply(list, axis=1)\n intervals = union(df.begin_end)\n\n # Add a group column\n df[\"group_in_doc\"] = df[\"begin\"].apply(\n lambda x: next(g for g, l in enumerate(intervals) if l.contains_point(x))\n )\n\n df[\"group_id\"] = df[\"doc_id\"] + \"_\" + df[\"group_in_doc\"].astype(str)\n\n corpus_dfs.append(df) # add to corpus overview\n\n # make Agreestat output dfs by category label\n for cat in category_names:\n\n # resolve self-overlap, i.e. same group within annotator, in exceptional cases the label is used\n df = resolve_self_overlap(df, cat, extent)\n doc_df = df.pivot(index=\"group_id\", columns=\"anno_id\", values=cat)\n doc_dfs[cat].append(doc_df)\n\n corpus_df = pd.concat(corpus_dfs) # combine all dfs into one corpus-level df\n corpus_df.to_csv(\"all_annotations_alignment_info.csv\", sep=\"\\t\")\n for gn, gdf in corpus_df.groupby(\"group_id\"):\n if len(gdf) > 2 and gdf[\"end\"].is_unique and gdf[\"begin\"].is_unique:\n pass\n print(gdf)\n # write csv's for agreestat\n cat_dfs = {} # for output\n for cat, doc_dfs in doc_dfs.items():\n all_df = pd.concat(doc_dfs) # join all docs into corpus-level overview df\n\n cat_dfs[cat] = all_df\n fp = Path(\n f\"agreestat-iaa-files/{unit_name}-{cat}.csv\"\n ) # missing data as blank (default Agreestat)\n all_df.to_csv(fp, index=False)\n\n # Also output missing data as Missing label\n all_df_missing_labeled = all_df.fillna(\"Missing\")\n fp = Path(\n f\"agreestat-iaa-files/{unit_name}-{cat}-missing-labeled.csv\"\n ) # missing data as blank (default Agreestat)\n all_df_missing_labeled.to_csv(fp, index=False)\n\n return cat_dfs\n\n\ndef pair_wise_missing_as_label_analysis(project):\n \"\"\"\n NOT USED in publication.\n Use NLTK agreement study package that handles missing labels as its own category. > bad approach\n In pairwise manner.\n :param project: WebAnno project parsed\n :return:\n \"\"\"\n\n # extract relevant annotations as document representation. (doc_id, anno_id, annotations_to_align)\n data = [\n [d.document_id, d.annotator_id, d.sentiment_expressions]\n for d in project.annotation_documents\n ]\n\n data_iaa = {}\n\n kf = lambda x: x[0] # groupby doc_id\n for doc_id, docs in groupby(sorted(data, key=kf), kf):\n docs = list(docs)\n for d1, d2 in combinations(docs, 2): # pairwise matching\n anno_key = \"-\".join(sorted([d1[1], d2[1]]))\n matches = []\n unmatched = d1[2] + d2[2]\n x_token_ids = [x.get_extent_token_ids(extent=[]) for x in d1[2]]\n y_token_ids = [y.get_extent_token_ids(extent=[]) for y in d2[2]]\n dice_scores = score_dice_pairwise(x_token_ids, y_token_ids)\n\n for (x_idx, y_idx) in zip(*np.nonzero(dice_scores)):\n x = d1[2][x_idx]\n y = d2[2][y_idx]\n match = (x, y)\n try:\n unmatched.remove(x)\n unmatched.remove(y)\n except ValueError as e:\n print(x, y, e)\n matches.append(match)\n\n print(\"--------Matched--------\")\n for m1, m2 in matches:\n print(\n f\"{m1.get_extent_text(extent=[])}.{x.polarity_sentiment} = {m2.get_extent_text(extent=[])}.{x.polarity_sentiment}\"\n )\n print(\"--------Unmatched--------\")\n for x in unmatched:\n print(\n f\"{x.get_extent_text(extent=[])}.{x.polarity_sentiment} - {x.annotator_id}\"\n )\n\n # create data for custom anno task\n for i, (m1, m2) in enumerate(matches):\n data_iaa.setdefault(anno_key, []).append(\n (m1.annotator_id, f\"{doc_id}_{i}\", m1.polarity_sentiment)\n )\n data_iaa.setdefault(anno_key, []).append(\n (m2.annotator_id, f\"{doc_id}_{i}\", m2.polarity_sentiment)\n )\n for i, x in enumerate(unmatched):\n data_iaa.setdefault(anno_key, []).append(\n (x.annotator_id, f\"{doc_id}_{i+len(matches)}\", x.polarity_sentiment)\n )\n other_anno = [a for a in [d1[1], d2[1]] if a != x.annotator_id][0]\n data_iaa.setdefault(anno_key, []).append(\n (other_anno, f\"{doc_id}_{i+len(matches)}\", \"NaN\")\n )\n\n # 1.2. alignments = match_alignment(doc1, doc2, criteria=[\"full_overlap\", \"boundary\", \"partial_overlap\"])\n # 1.3. write output file with matched + unmatched units\n\n # 2. compute same metrics with 1 reference (not correct handling of missing values)\n for anno_pair, data in data_iaa.items():\n t = CustomAnnotationTask(data)\n results = t.compute_all(average=\"micro\")\n print(anno_pair, results)\n\n\ndef write_agreestat_token(\n project,\n extents=[\n \"event_extent\",\n \"participant_extent\",\n \"filler_extent\",\n \"canonical_referent_extent\",\n \"discontiguous_trigger_extent\",\n \"sentiment_expression_extent\",\n ],\n):\n \"\"\"\n Writes agreestat data file for each type of extent with an entry for each token.\n Extents are identified by attributes on token containing extent.\n :param project:\n :return:\n \"\"\"\n\n def join_extents(data, to_join=(), new_name=\"new\"):\n for anno_id in data[to_join[0]].keys():\n first = data[to_join[0]][anno_id]\n second = data[to_join[1]][anno_id]\n new = [\n new_name if x != None or y != None else None\n for x, y in zip(first, second)\n ]\n data.setdefault(new_name, dict())[anno_id] = new\n for k in to_join:\n data.pop(k, None)\n return data\n\n data = {ext: {} for ext in extents}\n all_tokens = [t for d in project.annotation_documents for t in d.tokens]\n for doc in project.annotation_documents:\n for token in doc.tokens:\n for ext in extents:\n res = getattr(token, ext)\n if res:\n anno = ext.split(\"_\")[0]\n else:\n anno = None\n data[ext].setdefault(token.annotator_id, []).append(anno)\n\n # join discontiguous and unit extents into one trigger annotation\n if \"discontiguous_trigger_extent\" in extents and \"event_extent\" in extents:\n data = join_extents(\n data,\n to_join=(\"event_extent\", \"discontiguous_trigger_extent\"),\n new_name=\"trigger_event\",\n )\n\n for extent_n, annotators in data.items():\n df = pd.DataFrame(annotators)\n dirp = Path(f\"agreestat-iaa-files/token-identification\")\n # df.to_csv(dirp / (extent_n + \".csv\"), index=False)\n df_no_anno = df.fillna(\"no_anno\")\n df_no_anno.to_csv(dirp / (extent_n + \"_no_labeled.csv\"), index=False)\n\n\nif __name__ == \"__main__\":\n\n # # Events: load EVENT IAA STUDY\n event_proj = parse_project(settings.IAA_XMI_DIRP, from_scratch=False)\n event_proj.annotation_documents = [\n d for d in event_proj.annotation_documents if d.annotator_id != \"gilles\"\n ]\n\n categories = [\"event_type\", \"event_fulltype\", \"polarity_negation\", \"modality\"]\n matched_dfs = overlap_match_to_agreestat(\n event_proj,\n \"events\",\n extent=[\"discontiguous_triggers\"],\n category_names=categories,\n )\n\n # naive token approach for comparison:\n write_agreestat_token(\n event_proj,\n extents=[\n \"event_extent\",\n \"participant_extent\",\n \"filler_extent\",\n \"discontiguous_trigger_extent\",\n ],\n )\n\n # Sentiment: load the final SENTIMENT IAA study project and set gold standard\n sent_proj = parse_project(settings.SENTIMENT_IAA)\n sent_proj.annotation_documents = [\n d for d in sent_proj.annotation_documents if d.annotator_id != \"gilles\"\n ]\n\n # match overlap groups\n categories = [\n \"polarity_sentiment\",\n \"polarity_sentiment_scoped\",\n \"negated\",\n \"uncertain\",\n ]\n matched_dfs = overlap_match_to_agreestat(\n sent_proj, \"sentiment_expressions\", extent=[], category_names=categories\n )\n\n # naive token approach for comparison:\n write_agreestat_token(sent_proj, extents=[\"sentiment_expression_extent\"])\n","repo_name":"GillesJ/sentivent_webannoparser","sub_path":"span_iaa.py","file_name":"span_iaa.py","file_ext":"py","file_size_in_byte":22078,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"5556715792","text":"from fractions import Fraction\nfrom math import log10\n\ndef f(n, x):\n if n==0:\n return Fraction(3,2)\n \n if n == 1:\n return 1 + Fraction(1, x+2)\n \n return f(n-1, Fraction(1, x + 2))\n\ncount = 0\n\nfor n in range(0,1000):\n if int(log10(f(n,Fraction(1,2)).numerator)) > int(log10(f(n,Fraction(1,2)).denominator)):\n count +=1\nprint(count) \n","repo_name":"amalekan/ProjectEuler","sub_path":"Problem57.py","file_name":"Problem57.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"36157574893","text":"from db_config import db_init as db\n\n# Category 模型类\nclass Tag(db.Model):\n # 表名\n __tablename__ = \"tags\"\n\n # 字段名\n tag_id = db.Column(db.Integer, primary_key=True)\n tag_name = db.Column(db.String(64), unique=True, nullable=False)\n\n def __init__(self, tag_name):\n self.tag_name = tag_name","repo_name":"EuDs63/BookRecommend_Back","sub_path":"models/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"}
+{"seq_id":"74050563094","text":"from __future__ import print_function, division, absolute_import\n\n\nimport numpy as np\nfrom astropy.constants import c\n\n\ndef convolve_with_constant_velocity_kernel(wave, flux, v = 1e4):\n \"\"\"\n Convolve a spectrum with a gaussian kernel of constant velocity\n \n Parameters\n ----------\n wave : float arr\n Wavelength values for each pixel in the spectrum. Velocity \n calculations assume wavelength is given in Ang.\n \n flux : float arr\n Flux values for each pixel in the spectrum. Relative accuracy\n is assumed, but absolute scale is not necessary.\n \n v : float, optional (default = 1e4)\n Full-width half-max velocity of the gaussian kernel. Velocity \n must be in units of km/s.\n \n Returns\n -------\n interp_grid : float arr\n Wavelength (in same units as wave) at each pixel in the \n interpolated grid used for the convolution\n \n conv_flux : float arr\n Flux at each pixel in wave following convolution with the \n constant velocity width kernel\n \"\"\"\n \n deltaAng = np.median(np.diff(wave))\n interp_grid = np.arange(min(wave), max(wave), deltaAng)\n interp_flux = np.interp(interp_grid, wave, flux)\n\n var_kern_fwhm = v*1e3/c.value*interp_grid # 1e3 converts c from m/s to km/s\n conv_flux = np.empty(len(interp_flux))\n\n for pix in range(len(conv_flux)):\n sigmaKern = var_kern_fwhm[pix]/(2*np.sqrt(2*np.log(2)))\n gx = np.arange(-4*sigmaKern, 4*sigmaKern, deltaAng)\n kern = deltaAng/np.sqrt(2*np.pi*sigmaKern**2)*np.exp(-1./2*(gx/sigmaKern)**2)\n gauss_flux = np.convolve(interp_flux, kern, mode = 'same')\n conv_flux[pix] = gauss_flux[pix]\n \n return interp_grid, conv_flux\n","repo_name":"yaoyuhan/SN2019dge","sub_path":"playground/helper/specconvolve.py","file_name":"specconvolve.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"15380262303","text":"# ---------------------------------------- Tribonacci non-divisors -------------------------------------------- #\n# #\n# The sequence 1, 1, 1, 3, 5, 9, 17, 31, 57, 105, 193, 355, 653, 1201 ... #\n# is defined by T1 = T2 = T3 = 1 and Tn = Tn-1 + Tn-2 + Tn-3. #\n# #\n# It can be shown that 27 does not divide any terms of this sequence. #\n# In fact, 27 is the first odd number with this property. #\n# #\n# Find the 124th odd number that does not divide any terms of the above sequence. #\n# ------------------------------------------------------------------------------------------------------------- #\nimport time\n\ndef gen_tribonacci_sequence_mod_m(m):\n t1 = 1\n t2 = 1\n t3 = 1\n\n yield t1\n yield t2\n yield t3\n\n while True:\n t1, t2, t3 = t2, t3, (t1 + t2 + t3) % m\n\n yield t3\n\ndef gen_next_tribonacci_odd_non_divisor():\n d = 3\n\n while True:\n g = gen_tribonacci_sequence_mod_m(d)\n\n t1 = next(g)\n t2 = next(g)\n t3 = next(g)\n \n while True:\n t1, t2, t3 = t2, t3, next(g)\n \n if t3 == 0:\n break\n elif t1 * t2 * t3 == 1:\n yield d\n break\n \n d += 2\n\ndef eu225():\n TARGET = 124\n\n g = gen_next_tribonacci_odd_non_divisor()\n \n for i in range(TARGET):\n d = next(g)\n \n return d\n\nif __name__ == \"__main__\":\n startTime = time.clock()\n print (eu225())\n elapsedTime = time.clock() - startTime\n print (\"Time spent in (\", __name__, \") is: \", elapsedTime, \" sec\")\n","repo_name":"sefi-roee/ProjectEuler","sub_path":"eu225.py","file_name":"eu225.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"11723185713","text":"from .scene import Scene\nfrom Game_State import Constants, Color\nfrom utils import CellCoord, Instruction\nimport pygame\nimport time\n\n\nclass MazeScene(Scene):\n def __init__(self, game_state):\n self.game_state = game_state\n self.instructions = []\n self.counter = 0\n self.coord = CellCoord(0, 0)\n self.alg = None\n\n def initialize(self):\n self.game_state.next_scene = None\n self.reset()\n self.make_walls()\n self.alg.run()\n print(\"finished algorithm\")\n\n def reset(self):\n self.counter = 0\n self.instructions.clear()\n self.coord = CellCoord(0, 0)\n\n def process_input(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n\n if self.counter < len(self.instructions) - 1:\n continue\n if event.type == pygame.MOUSEBUTTONDOWN:\n # click a checkbox\n mouse_pos = pygame.mouse.get_pos()\n self.game_state.click_checkbox(mouse_pos)\n\n # click button\n self.game_state.click_button(mouse_pos)\n\n if event.type == pygame.MOUSEMOTION:\n mouse_pos = pygame.mouse.get_pos()\n self.game_state.button.is_over(mouse_pos)\n for checkbox in self.game_state.checkbox.values():\n checkbox.is_over(mouse_pos)\n\n def update(self):\n if self.counter == len(self.instructions):\n # print('instructions empty')\n return\n else:\n instruction = self.instructions[self.counter]\n instruction.run()\n self.counter += 1\n if self.counter >= Constants.NODE_LENGTH_COUNT * Constants.NODE_WIDTH_COUNT:\n time.sleep(0.05)\n\n def render(self):\n self.game_state.render()\n\n def make_walls(self):\n while self.coord.row <= Constants.NODE_LENGTH_COUNT - 1:\n curr_node = self.game_state.maze[self.coord.row][self.coord.col]\n state = \"\"\n # first row and last row\n if self.coord.row == 0 or self.coord.row == Constants.NODE_LENGTH_COUNT - 1:\n state = \"wall\"\n # if row is even\n elif self.coord.row % 2 == 0:\n state = \"wall\"\n # if row is odd and col is even\n elif self.coord.row % 2 == 1 and self.coord.col % 2 == 0:\n state = \"wall\"\n elif self.coord.row % 2 == 1 and self.coord.col % 2 == 1:\n state = \"path\"\n\n curr_node.state = state\n curr_node.status = \"unvisited\"\n if state == \"path\":\n curr_node.setup_neighbours()\n\n self.instructions.append(\n Instruction(curr_node, Color.WALL, state=state, status=\"unvisited\")\n )\n\n if self.coord.col >= Constants.NODE_WIDTH_COUNT - 1:\n self.coord.row += 1\n self.coord.col = 0\n else:\n self.coord.col += 1\n # print(len(self.instructions))\n\n","repo_name":"william1357chen/Maze-Generator-and-Pathfinder","sub_path":"Scene/maze_scene.py","file_name":"maze_scene.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"10908266425","text":"# -*- coding:utf-8 -*- \r\nfrom requests_oauthlib import OAuth1Session\r\nimport json\r\n \r\noath_key_dict = {\r\n \"consumer_key\": \"8TKebUW9PSm5yMtCrQF9dM7H0\",\r\n \"consumer_secret\": \"3GzoLxTgEYSMVVCnY0pSdhaZlRaBghaqoceN0AZ0XYlEiunjRk\",\r\n \"access_token\": \"725947432631033856-QHkFkYaSxWyXLCH0vPtJUWgKSCtxn9p\",\r\n \"access_token_secret\": \"WNUU9028nX60nG2zhNZnVdU4kA76MyecNiddTjZEWbarh\"\r\n}\r\n\r\ndef searchtweet(geofile,searchstr,strlang,apdist):\r\n #読込むファイル名を設定\r\n geo_fname = r\"'\"+ geofile + \"'\"\r\n geo_fname = geo_fname.replace(\"'\",\"\")\r\n\r\n with open(geo_fname, 'r',encoding=\"utf-8\") as f:\r\n reader = f.readline()\r\n #UnicodeEncodeErrorを避けるため\r\n s = reader.encode('cp932', \"ignore\")\r\n reader_after = s.decode('cp932')\r\n #print(reader_after)\r\n while reader_after:\r\n d = reader_after.split(\",\")\r\n fname = d[0].strip()\r\n latitude = d[1].strip()\r\n longtude = d[2].strip()\r\n tweets = tweet_search(searchstr,strlang,str(latitude),str(longtude),str(apdist),oath_key_dict)\r\n \r\n out_tweets(tweets,fname)\r\n\r\n reader = f.readline()\r\n #UnicodeEncodeErrorを避けるため\r\n s = reader.encode('cp932', \"ignore\")\r\n reader_after = s.decode('cp932')\r\n #print(reader_after)\r\n return\r\n\r\ndef create_oath_session(oath_key_dict):\r\n oath = OAuth1Session(\r\n oath_key_dict[\"consumer_key\"],\r\n oath_key_dict[\"consumer_secret\"],\r\n oath_key_dict[\"access_token\"],\r\n oath_key_dict[\"access_token_secret\"]\r\n )\r\n return oath\r\n\r\ndef tweet_search(search_word,str_lang,latitude,longtude,ap_dist,oath_key_dict):\r\n #print(search_word)\r\n #print(str_lang)\r\n #print(latitude)\r\n #print(longtude)\r\n #print(ap_dist)\r\n\r\n if not str_lang:\r\n str_lang = \"ja\"\r\n if not ap_dist:\r\n ap_dist = \"7\"\r\n ap_dist = ap_dist+\"mi\"\r\n\r\n url = \"https://api.twitter.com/1.1/search/tweets.json?\"\r\n params = {\r\n \"q\": search_word,\r\n\t\"geocode\": \"\\\"\"+latitude+\",\"+longtude+\",\"+ap_dist+\"\\\"\",\r\n\t#\"geocode\": \"42.786787000000004,141.68293158837253,7mi\",\r\n \"lang\": str_lang,\r\n \"result_type\": \"recent\",\r\n \"count\": \"100\"\r\n }\r\n\r\n oath = create_oath_session(oath_key_dict)\r\n responce = oath.get(url, params = params)\r\n if responce.status_code != 200:\r\n print (\"Error code: %d\" %(responce.status_code))\r\n return None\r\n tweets = json.loads(responce.text)\r\n return tweets\r\n\r\ndef out_tweets(get_tweets,airport_fname):\r\n #出力ファイル名\r\n fname = r\"'\"+ airport_fname+\"_tweet.txt\"+ \"'\"\r\n fname = fname.replace(\"'\",\"\")\r\n #print(fname)\r\n #ファイルへ出力\r\n with open(fname, \"w\",encoding=\"utf-8\") as f1:\r\n for tweet in get_tweets[\"statuses\"]:\r\n tweet_id = tweet[u'id_str']\r\n text = tweet[u'text']\r\n created_at = tweet[u'created_at']\r\n user_id = tweet[u'user'][u'id_str']\r\n user_description = tweet[u'user'][u'description']\r\n screen_name = tweet[u'user'][u'screen_name']\r\n user_name = tweet[u'user'][u'name']\r\n\r\n f1.write('\\n')\r\n f1.write(tweet_id)\r\n f1.write('\\n')\r\n\r\n #UnicodeEncodeErrorを避けるため\r\n before_text = text.encode('cp932', \"ignore\")\r\n after_text = before_text.decode('cp932')\r\n f1.write(after_text)\r\n f1.write('\\n')\r\n\r\n f1.write(created_at)\r\n f1.write('\\n')\r\n f1.write(user_id)\r\n f1.write('\\n')\r\n\r\n #UnicodeEncodeErrorを避けるため\r\n before_user = user_description.encode('cp932', \"ignore\")\r\n after_user = before_user.decode('cp932')\r\n f1.write(after_user)\r\n f1.write('\\n')\r\n\r\n f1.write(screen_name)\r\n f1.write('\\n')\r\n\r\n #UnicodeEncodeErrorを避けるため\r\n before_uname = user_name.encode('cp932', \"ignore\")\r\n after_uname = before_uname.decode('cp932')\r\n f1.write(after_uname)\r\n f1.write('\\n')\r\n return\r\n\r\nif __name__ == '__main__':\r\n #Peach帰港空港GEOデータファイル名入力\r\n print ('====== Enter Peach Airport Geodata file =====')\r\n geofile = input('> ')\r\n\r\n #検索キーワードを入力\r\n print ('====== Enter Search String =====')\r\n searchstr = input('> ')\r\n #検索言語を入力\r\n print ('====== Enter Search Languate (ja: Japanese, en: English) =====')\r\n strlang = input('> ')\r\n #空港からの距離 (mile)を入力\r\n print ('====== Enter Disctance from Airport (unit: mile) =====')\r\n apdist = input('> ')\r\n\r\n searchtweet(geofile,searchstr,strlang,apdist)\r\n\r\n","repo_name":"NorikoEtani/PEACH","sub_path":"search_tweet.py","file_name":"search_tweet.py","file_ext":"py","file_size_in_byte":4803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"11602060388","text":"from typing import Dict, Tuple, List, Union\nfrom dataclasses import dataclass\nimport xml.etree.ElementTree as ET\nimport ctypes\nimport collections\n\n\n@dataclass\nclass ProcessorSpec:\n programcounter: str = None\n\n # TODO: properties\n # TODO: data_space\n # TODO: inferptrbounds\n # TODO: segmented_address\n # TODO: segmentop_type\n # TODO: context_data\n # TODO: volatile\n # TODO: incidentalcopy\n # TODO: jumpassist\n # TODO: register_data\n # TODO: default_symbols\n # TODO: default_memory_blocks\n\n def xml(self):\n f = ET.Element(\"processor_spec\")\n if self.programcounter is not None:\n e = ET.Element(\"programcounter\",\n attrib=dict(register=self.programcounter))\n f.append(e)\n return f\n\n def __str__(self):\n return ET.tostring(self.xml()).decode('ascii')\n\n\n@dataclass\nclass RegisterType:\n name: str\n\n def xml(self):\n return ET.Element(\"register\", attrib=dict(name=self.name))\n\n\n@dataclass\nclass AddrType:\n space: str\n offset: int = None\n piece1: int = None\n piece2: int = None\n piece3: int = None\n piece4: int = None\n\n def xml(self):\n f = ET.Element(\"addr\", attrib=dict(space=self.space))\n if self.offset is not None:\n f.attrib[\"offset\"] = str(self.offset)\n if self.piece1 is not None:\n f.attrib[\"piece1\"] = str(self.piece1)\n if self.piece2 is not None:\n f.attrib[\"piece2\"] = str(self.piece2)\n if self.piece3 is not None:\n f.attrib[\"piece3\"] = str(self.piece3)\n if self.piece4 is not None:\n f.attrib[\"piece4\"] = str(self.piece4)\n return f\n\n def fromxml(doc):\n assert doc.tag == \"addr\"\n space = doc.attrib['space']\n offset = None\n piece1 = None\n piece2 = None\n piece3 = None\n piece4 = None\n if \"offset\" in doc.attrib:\n offset = int(doc.attrib['offset'], 16)\n if \"piece1\" in doc.attrib:\n piece1 = int(doc.attrib['piece1'], 16)\n if \"piece2\" in doc.attrib:\n piece2 = int(doc.attrib['piece2'], 16)\n if \"piece3\" in doc.attrib:\n piece3 = int(doc.attrib['piece3'], 16)\n if \"piece4\" in doc.attrib:\n piece4 = int(doc.attrib['piece4'], 16)\n if \"size\" in doc.attrib:\n size = int(doc.attrib['size'], 16)\n return AddrType(\n space=space,\n offset=offset,\n piece1=piece1,\n piece2=piece2,\n piece3=piece3,\n piece4=piece4,\n )\n\n def __str__(self):\n return ET.tostring(self.xml()).decode('ascii')\n\n\n@dataclass\nclass RangeType:\n space: str\n first: int = None\n last: int = None\n\n def xml(self):\n f = ET.Element(\"range\", attrib=dict(space=self.space))\n f.attrib['space'] = self.space\n if self.first is not None and self.last is not None:\n f.attrib['first'] = str(self.first)\n f.attrib['last'] = str(self.last)\n return f\n\n\n@dataclass\nclass Pentry:\n entry: Union[RegisterType, AddrType]\n maxsize: int = None\n minsize: int = None\n align: int = None\n metatype: str = None\n extension: str = None\n\n # TODO trial\n\n def xml(self):\n f = ET.Element(\"pentry\")\n if self.minsize is not None:\n f.attrib['minsize'] = str(self.minsize)\n if self.maxsize is not None:\n f.attrib['maxsize'] = str(self.maxsize)\n if self.align is not None:\n f.attrib['align'] = str(self.align)\n if self.metatype is not None:\n f.attrib['metatype'] = self.metatype\n if self.extension is not None:\n f.attrib['extension'] = self.extension\n f.append(self.entry.xml())\n return f\n\n\n@dataclass\nclass VarnodeType:\n space: str\n offset: int\n size: int\n\n def xml(self):\n return ET.Element(\"varnode\",\n attrib=dict(space=self.space,\n offset=str(self.offset),\n size=str(self.size)))\n\n\n@dataclass\nclass Prototype:\n @dataclass\n class Input:\n pentry: List[Pentry]\n\n # TODO: pointermax\n # TODO: thisbeforetpointer\n # TODO: killedbycall\n\n def xml(self):\n f = ET.Element(\"input\")\n for i in self.pentry:\n f.append(i.xml())\n return f\n\n @dataclass\n class Output:\n pentry: List[Pentry]\n\n # TODO: killedbycall\n\n def xml(self):\n f = ET.Element(\"output\")\n for i in self.pentry:\n f.append(i.xml())\n return f\n\n extrapop: int\n stackshift: int\n name: int\n input: Input\n output: Output\n unaffected: List[Union[RegisterType, VarnodeType]] = None\n killedbycall: List[Union[RegisterType, VarnodeType]] = None\n likelytrash: List[Union[RegisterType, VarnodeType]] = None\n\n # TODO: pcode\n # TODO: localrange\n # TODO: type_\n # TODO: strategy\n # TODO: hasthis\n # TODO: constructor\n # TODO: returnaddress\n\n def xml(self):\n f = ET.Element(\"prototype\",\n attrib=dict(extrapop=str(self.extrapop),\n stackshift=str(self.stackshift),\n name=self.name))\n f.append(self.input.xml())\n f.append(self.output.xml())\n if self.unaffected is not None:\n e = ET.Element(\"unaffected\")\n for i in self.unaffected:\n e.append(i.xml())\n f.append(e)\n if self.killedbycall is not None:\n e = ET.Element(\"killedbycall\")\n for i in self.killedbycall:\n e.append(i.xml())\n f.append(e)\n if self.likelytrash is not None:\n e = ET.Element(\"likelytrash\")\n for i in self.likelytrash:\n e.append(i.xml())\n f.append(e)\n return f\n\n\n@dataclass\nclass CompilerSpec:\n @dataclass\n class StackPointer:\n register: str\n space: str\n growth: str = None\n reversejustify: bool = None\n\n def xml(self):\n f = ET.Element(\"stackpointer\")\n f.attrib['register'] = self.register\n f.attrib['space'] = self.space\n if self.growth is not None:\n f.attrib['growth'] = self.growth\n if self.reversejustify is not None:\n f.attrib['reversejustify'] = self.growth\n return f\n\n @dataclass\n class SpaceBase:\n name: str\n register: str\n space: str\n\n def xml(self):\n f = ET.Element(\"spacebase\")\n f.attrib['name'] = self.name\n f.attrib['register'] = self.register\n f.attrib['space'] = self.space\n return f\n\n @dataclass\n class Global:\n memory_tags_type: List[Union[RegisterType, RangeType]]\n\n def xml(self):\n f = ET.Element(\"global\")\n for i in self.memory_tags_type:\n f.append(i.xml())\n return f\n\n @dataclass\n class DeadcodeDelay:\n space: str\n delay: int\n\n def xml(self):\n return ET.Element(\"deadcodedelay\",\n attrib=dict(space=self.space,\n delay=str(self.delay)))\n\n default_proto: Prototype\n prototype: List[Prototype] = None\n stackpointer: StackPointer = None\n spacebase: List[SpaceBase] = None\n global_: Global = None\n deadcodedelay: List[DeadcodeDelay] = None\n\n # TODO: properties_type\n # TODO: data_organization\n # TODO: callfixup\n # TODO: callotherfixup\n # TODO: context_data\n # TODO: enum\n # TODO: prefersplit\n # TODO: aggressivetrim\n # TODO: nohighptr\n # TODO: returnaddress\n # TODO: funcptr\n # TODO: inferptrbounds\n # TODO: segmentop_type\n # TODO: resolveprototype\n # TODO: eval_current_prototype\n # TODO: eval_called_prototype\n\n def xml(self):\n f = ET.Element(\"compiler_spec\")\n if self.stackpointer is not None:\n f.append(self.stackpointer.xml())\n if self.global_ is not None:\n f.append(self.global_.xml())\n if self.default_proto is not None:\n e = ET.Element(\"default_proto\")\n e.append(self.default_proto.xml())\n f.append(e)\n if self.spacebase is not None:\n for i in self.spacebase:\n f.append(i.xml())\n if self.prototype is not None:\n e = ET.Element(\"prototypes\")\n for i in self.prototype:\n e.append(i.xml())\n f.append(e)\n if self.deadcodedelay is not None:\n for i in self.deadcodedelay:\n f.append(i.xml())\n return f\n\n def __str__(self):\n return ET.tostring(self.xml()).decode('ascii')\n\n\n@dataclass\nclass Space:\n name: str\n index: int\n size: int\n bigendian: bool\n delay: int\n physical: bool\n global_: bool\n\n def xml(self):\n f = ET.Element(\"space\",\n attrib=dict(\n name=self.name,\n index=str(self.index),\n size=str(self.size),\n bigendian=str(self.bigendian).lower(),\n delay=str(self.delay),\n physical=str(self.physical).lower(),\n ))\n f.attrib['global'] = str(self.global_).lower()\n return f\n\n\n@dataclass\nclass OtherSpace(Space):\n def xml(self):\n f = ET.Element(\"space_other\",\n attrib=dict(\n name=self.name,\n index=str(self.index),\n size=str(self.size),\n bigendian=str(self.bigendian).lower(),\n delay=str(self.delay),\n physical=str(self.physical).lower(),\n ))\n f.attrib['global'] = str(self.global_).lower()\n return f\n\n\n@dataclass\nclass UniqueSpace(Space):\n def xml(self):\n f = ET.Element(\"space_unique\",\n attrib=dict(\n name=self.name,\n index=str(self.index),\n size=str(self.size),\n bigendian=str(self.bigendian).lower(),\n delay=str(self.delay),\n physical=str(self.physical).lower(),\n ))\n f.attrib['global'] = str(self.global_).lower()\n return f\n\n\n@dataclass\nclass SleighSpec:\n @dataclass\n class Spaces:\n defaultspace: str\n spaces: List[Union[Space, OtherSpace, UniqueSpace]]\n\n def xml(self):\n f = ET.Element(\"spaces\",\n attrib=dict(defaultspace=self.defaultspace))\n for i in self.spaces:\n f.append(i.xml())\n return f\n\n bigendian: bool\n uniqbase: int\n spaces: Spaces\n\n def xml(self):\n f = ET.Element(\"sleigh\",\n attrib=dict(bigendian=str(self.bigendian),\n uniqbase=hex(self.uniqbase)))\n f.append(self.spaces.xml())\n return f\n\n def __str__(self):\n return ET.tostring(self.xml()).decode('ascii')\n\n\n@dataclass\nclass TrackedPointSet:\n space: str\n offset: int\n addrs: List[Tuple[AddrType, int, int]]\n\n def xml(self):\n f = ET.Element(\"tracked_pointset\",\n attrib=dict(space=self.space, offset=str(self.offset)))\n for (a, s, v) in self.addrs:\n r = a.xml()\n r.attrib['size'] = str(s)\n r.attrib['val'] = str(v)\n f.append(r)\n return f\n\n\n@dataclass\nclass Label:\n name: str\n addr: int\n\n def __post_init__(self):\n self.label = \"{}_{}\".format(self.name, hex(self.addr))\n\n\n@dataclass\nclass SpaceId:\n space: str\n\n\n@dataclass\nclass VarNode:\n size: int\n space: str\n offs: int\n\n def __post_init__(self):\n self.offs = ctypes.c_uint(self.offs).value\n\n def __str__(self):\n return \"({}, {}, {})\".format(self.space, hex(self.offs),\n hex(self.size))\n\n def __hash__(self):\n return hash(self.size) ^ hash(self.space) ^ hash(self.offs)\n\n\n@dataclass\nclass PcodeOp:\n op: int\n inrefs: List[Union[SpaceId, VarNode, Label]]\n outref: VarNode = None\n\n\nclass Ast:\n def __init__(self, r):\n funcs = r.findall(\"function\")\n self.name = funcs[0].attrib['name']\n addrs = r.findall(\"function/ast/varnodes/addr\")\n self.vn = dict()\n for i in addrs:\n space = i.attrib['space']\n ref = int(i.attrib['ref'], 16)\n offs = int(i.attrib['offset'], 16)\n size = int(i.attrib['size'])\n if space == \"const\":\n space = \"constant\"\n self.vn[ref] = VarNode(size, space, offs)\n\n def xml2opnd(r):\n if r.tag == \"void\":\n return None\n if r.tag == \"addr\":\n return self.vn[int(r.attrib['ref'], 16)]\n if r.tag == \"spaceid\":\n return SpaceId(r.attrib[\"name\"])\n if r.tag == \"iop\":\n return VarNode(0, \"iop\", int(r.attrib['value'], 16))\n raise Exception(\"NYI tag {}\".format(ET.tostring(r)))\n\n self.blocks = dict()\n block = r.findall(\"function/ast/block\")\n for i in block:\n pcode = []\n for ops in i.findall(\"op\"):\n op = int(ops.attrib['code'])\n opnd = list(ops)\n seqnum = (opnd[0].attrib['offset'], opnd[0].attrib['space'],\n opnd[0].attrib['uniq'])\n outref = xml2opnd(opnd[1])\n inrefs = list(map(xml2opnd, opnd[2:]))\n pcode.append(PcodeOp(op, inrefs, outref))\n self.blocks[int(i.attrib['index'])] = (pcode, seqnum)\n\n self.edges = collections.defaultdict(lambda: set())\n edges = r.findall(\"function/ast/blockedge\")\n for i in edges:\n idx = int(i.attrib['index'])\n for j in i.findall(\"edge\"):\n end = int(j.attrib['end'])\n rev = int(j.attrib['rev'])\n self.edges[end].add((idx, rev))\n","repo_name":"toshipiazza/ghidra_fun","sub_path":"python/src/ghidra_types.py","file_name":"ghidra_types.py","file_ext":"py","file_size_in_byte":14413,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"}
+{"seq_id":"29049846926","text":"# -*- coding: utf-8 -*-\n\nimport pytest\n\nfrom bamboo_engine.builder import * # noqa\nfrom bamboo_engine.engine import Engine\nfrom eri_chaos.runtime import ChoasBambooDjangoRuntime\n\nfrom ..utils import * # noqa\n\n\n@pytest.mark.parametrize(\n \"execute_choas_plans\",\n [\n pytest.param([{\"get_context_key_references\": {\"raise_time\": \"pre\"}}], id=\"get_context_key_references_raise\"),\n pytest.param([{\"get_context_values\": {\"raise_time\": \"pre\"}}], id=\"get_context_values_raise\"),\n pytest.param([{\"fork\": {\"raise_time\": \"pre\"}}], id=\"fork_raise\"),\n pytest.param([{\"set_state\": {\"raise_time\": \"pre\", \"raise_call_time\": 4}}], id=\"pre_set_state_raise\"),\n pytest.param([{\"set_state\": {\"raise_time\": \"post\", \"raise_call_time\": 4}}], id=\"post_set_state_raise\"),\n ],\n)\ndef test(execute_choas_plans):\n start = EmptyStartEvent()\n cpg = ConditionalParallelGateway(\n conditions={\n 0: \"'${a}' == '1_2'\",\n 1: \"'${b}' == '1'\",\n 2: \"'${c}' == '2'\",\n 3: \"True == False\",\n 4: \"True == False\",\n }\n )\n acts = [ServiceActivity(component_code=\"debug_node\") for _ in range(5)]\n cg = ConvergeGateway()\n end = EmptyEndEvent()\n\n start.extend(cpg).connect(*acts).converge(cg).extend(end)\n\n pipeline_data = Data()\n pipeline_data.inputs[\"${a}\"] = Var(type=Var.SPLICE, value=\"${b}_${c}\")\n pipeline_data.inputs[\"${b}\"] = Var(type=Var.PLAIN, value=\"1\")\n pipeline_data.inputs[\"${c}\"] = Var(type=Var.PLAIN, value=\"2\")\n\n pipeline = build_tree(start, data=pipeline_data)\n engine = Engine(\n ChoasBambooDjangoRuntime(stage=\"start\", execute_choas_plans=execute_choas_plans, schedule_choas_plans=[])\n )\n engine.run_pipeline(pipeline=pipeline, root_pipeline_data={})\n\n node_id_list = [pipeline[\"id\"], start.id, cpg.id, acts[0].id, acts[1].id, acts[2].id, cg.id, end.id]\n node_data_dict = {\n a.id: {\"inputs\": {\"_loop\": 1, \"_inner_loop\": 1}, \"outputs\": {\"_loop\": 1, \"_inner_loop\": 1, \"_result\": True}}\n for a in acts[:3]\n }\n node_data_dict[pipeline[\"id\"]] = {\"inputs\": {}, \"outputs\": {}}\n\n assert_all_finish(node_id_list)\n assert_not_executed([acts[3].id, acts[4].id])\n assert_exec_data_equal(node_data_dict)\n for a in acts[:3]:\n assert_schedule_finish(a.id, times=1)","repo_name":"TencentBlueKing/bamboo-engine","sub_path":"runtime/bamboo-pipeline/test/eri_imp_test_use/tests/chaos/test_conditional_parallel_gateway.py","file_name":"test_conditional_parallel_gateway.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","stars":121,"dataset":"github-code","pt":"67"}
+{"seq_id":"41958734742","text":"from urllib import request\nfrom urllib import parse\nfrom http.cookiejar import CookieJar\nheaders = {\n 'User-Agent': \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0\",\n}\ndef get_opner():\n cookiejar = CookieJar()\n headler = request.HTTPCookieProcessor(cookiejar)\n opener = request.build_opener(headler)\n return opener\ndef get_header(opener):\n data = {\n 'email': '924255352@qq.com',\n 'password': '15070640127ling'\n }\n login_url = \"http://www.renren.com/PLogin.do\"\n req = request.Request(login_url,data=parse.urlencode(data).encode('utf-8'),headers=headers)\n opener.open(req)\ndef visit_profile(opener):\n dapen_url = \"http://www.renren.com/880151247/profile\"\n req = request.Request(dapen_url,headers=headers)\n resp = opener.open(req)\n with open('renren.html','w',encoding='utf-8') as fp:\n fp.write(resp.read().decode('utf-8'))\nif __name__ == '__main__':\n opener = get_opner()\n get_header(opener)\n visit_profile(opener)","repo_name":"kingle666/python","sub_path":"test/cookie.py","file_name":"cookie.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"11153430402","text":"\"\"\"Script to simulate studies with Autodesk Moldflow\n\nCommand structure:\n >>> python run_simulations \n\"\"\"\n\nimport os\nimport subprocess\nfrom argparse import ArgumentParser\nimport pickle\n\nimport pythoncom\nimport win32com.client\nfrom tqdm import tqdm\nimport madcad\n\nfrom study import Study\n\n# Path to Autodesk Moldflow\nMF = os.path.join(\"C:/\", \"Program Files\", \"Autodesk\", \"Moldflow Insight 2021.1\", \"bin\")\n\n# Define outputs\nOUT = {\n \"1610\": \"fill_time\",\n \"1653\": \"weld_surface\",\n \"1722\": \"weld_line\",\n}\n\n\nparser = ArgumentParser(\n description='Loads pickled Study Objects and simulates them with Moldflow'\n)\n\nparser.add_argument(\n \"-i\", \"--input_dir\",\n dest=\"input_dir\",\n type=str,\n help=\"Directory containing pickled Study Objects.\",\n default=os.getcwd(),\n required=False\n )\n\nparser.add_argument(\n \"-o\", \"--output_dir\",\n dest=\"output_dir\",\n type=str,\n help=\"Directory to place the output files in.\",\n default=os.getcwd(),\n required=False\n)\nargs = parser.parse_args()\n\n# verify arguments\nif not os.path.isdir(args.input_dir):\n raise ValueError(' must be a path to a valid directory.')\nif not os.path.isdir(args.output_dir):\n raise ValueError(' must be a path to a valid directory.')\n\n# unpickle all study objects in input_dir\nfile_names = os.listdir(os.path.abspath(args.input_dir))\npickle_file_names = filter(lambda fn: fn.endswith(\".pickle\"), file_names)\npickle_file_paths = [os.path.abspath(os.path.join(args.input_dir, pfn)) for pfn in pickle_file_names]\npickle_file_paths = sorted(pickle_file_paths)\n\n# perform simulation with moldflow\nfor pfp in tqdm(pickle_file_paths):\n with open(pfp, \"rb\") as pf:\n obj = pickle.load(pf)\n if not isinstance(obj, Study):\n continue\n s = obj\n\n # Create working directory\n path = os.path.abspath(os.path.join(args.output_dir, s.name))\n os.mkdir(path)\n\n # Export geometry (needs numpy-stl)\n geo_name = f\"{s.name}.stl\"\n madcad.write(s.geometry, os.path.join(path, geo_name))\n\n # Connect to Moldflow Synergy\n Synergy = win32com.client.Dispatch(\"synergy.Synergy\")\n Synergy.SetUnits(\"Metric\")\n\n # Create project\n Synergy.NewProject(s.name, path)\n\n # Loop through injection locations\n for location, direction in s.injection_locations:\n # Import stl file\n ImpOpts = Synergy.ImportOptions\n ImpOpts.MeshType = \"3D\"\n ImpOpts.Units = \"mm\"\n Synergy.ImportFile(f\"{s.name}.stl\", ImpOpts, False)\n\n # Rename study\n study_name = f\"{s.name}_{int(location[0])}_{int(location[1])}_study\"\n Project = Synergy.Project\n Project.RenameItemByName(f\"{s.name}_study\", \"Study\", study_name)\n\n # Set injection location\n BoundaryConditions = Synergy.BoundaryConditions\n Direction = Synergy.CreateVector\n Direction.SetXYZ(*direction)\n Location = Synergy.CreateVector\n Location.SetXYZ(*location)\n EntList = BoundaryConditions.CreateNDBCAtXYZ(\n Location, Direction, 40000, pythoncom.Nothing\n )\n\n # Build mesh\n MeshGenerator = Synergy.MeshGenerator\n MeshGenerator.EdgeLength = 2.5\n MeshGenerator.Generate\n\n # Set number of intermediate results\n PropEd = Synergy.PropertyEditor\n Prop = PropEd.FindProperty(10080, 1)\n DVec = Synergy.CreateDoubleArray\n DVec.AddDouble(50)\n Prop.FieldValues(910, DVec)\n PropEd.CommitChanges(\"Process Conditions\")\n\n # Save the sdy files\n StudyDoc = Synergy.StudyDoc\n StudyDoc.Save\n\n # Save mesh as Patran file\n Project = Synergy.Project\n Project.ExportModel(os.path.join(path, study_name + \".pat\"))\n\n # Run the simulation\n p = subprocess.Popen(\n [os.path.join(MF, \"runstudy.exe\"), study_name + \".sdy\",],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n cwd=path,\n )\n (output, err) = p.communicate()\n with open(os.path.join(path, study_name + \".log\"), \"w\") as file:\n file.write(output.decode(\"windows-1252\").strip())\n\n for key, value in OUT.items():\n p = subprocess.Popen(\n [os.path.join(MF, \"studyrlt.exe\"), study_name + \".sdy\", \"-xml\", key,],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n cwd=path,\n )\n (output, err) = p.communicate()\n temp_name = os.path.join(path, f\"{study_name}.xml\")\n os.rename(temp_name, temp_name.replace(\".xml\", f\"_{value}.xml\"))\n","repo_name":"J-Zoll/gnn_mold_filling_simulation","sub_path":"run_simulations.py","file_name":"run_simulations.py","file_ext":"py","file_size_in_byte":4631,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"}
+{"seq_id":"12403408776","text":"#Date: 27-07-2023\r\n#Day : Thursday\r\n\r\n\r\n\r\n#class creation:\r\n\r\n#syntax - \r\n# class class_name():\r\n\r\nclass vehicle():\r\n def __init__(self,model,color,speed):\r\n self.model=model\r\n self.color=color\r\n self.speed=speed\r\n\r\n def details(self):\r\n return {\r\n \"Brand\" :self.model,\r\n \"Color\" :self.color,\r\n \"Max_speed\" :f\"{self.speed} kmph\"\r\n } \r\n \r\n\r\n\r\n#Audi car:\r\n\r\naudi=vehicle(\"Audi\",\"White\",200)\r\nprint(audi.details())\r\n\r\n#BMW car:\r\n\r\nBMW=vehicle(\"BMW\",\"Black\",259)\r\nprint(BMW.details())\r\n\r\n","repo_name":"Anandhakumar123456/my_projects","sub_path":"workout/July 2023/W_27_07_01.py","file_name":"W_27_07_01.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"42955452305","text":"from ..GPU_np import np\nfrom ..base.Tensor import Tensor\n\n\ndef average(x: Tensor, axis=None, keepdims=False):\n if axis is None:\n axis = range(len(x.shape))\n if not isinstance(axis, tuple):\n axis = (axis,)\n ret = np.average(x.data, axis=axis, keepdims=keepdims)\n if not x.requires_grad:\n return Tensor(ret)\n\n grad_shape = list(x.shape)\n count_num = 1\n for dim in axis:\n count_num *= grad_shape[dim]\n grad_shape[dim] = 1\n grad_shape = tuple(grad_shape)\n\n def grad_fn(grad):\n return grad.reshape(grad_shape) * np.full(shape=x.shape, fill_value=1.0/count_num)\n\n return Tensor(\n data=ret,\n requires_grad=True,\n depend_on=[(x, grad_fn)]\n )\n\n\ndef mean(x: Tensor, axis=None, keepdim=False):\n return average(x, axis, keepdim)\n","repo_name":"JustinRochester/torch-CNN","sub_path":"mytorch/nptorch/nn/functional/average.py","file_name":"average.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"70098854293","text":"# Matched filter performance in WGN\n# H0: x[n] = w[n]\n# H1: x[n] = s[n] + w[n], n = 0, 1, ..., N-1\n# w[N] is WGN with mean 0 and variance var.\n# s[n] is deterministic.\n\nfrom utils import *\nimport matplotlib.pyplot as plt\n\nN = 1024\nM = 1000\n\npfa = np.logspace(-1, -1, 1)\nenr_range = np.linspace(0, 20, 50)\nd2 = np.array([10 ** (enr / 10) for enr in enr_range])\n\nfor i in range(pfa.size):\n # generate the deterministic signal.\n\n Ts = 1 / 1000 # sampling period.\n fs = 1 / Ts # sampling frequency\n\n t = np.arange(N) * Ts # continuous time signal.\n\n A = 1e-6 # small signal amplitude.\n s = np.ones(t.shape)\n\n # numerically calculate probability of detection.\n P = np.zeros_like(enr_range)\n for k in range(d2.size):\n # variance corresponding to d2\n var = N * A ** 2 / d2[k]\n\n # determine the threshold corresponding to gamma\n gamma = Qinv(pfa[i]) * np.sqrt(2 * N / var)\n\n # generate the data.\n data = np.random.laplace(scale=np.sqrt(var / 2), size=(M, N)) + A * s\n\n # apply the detector.\n T = np.sqrt(2 / var) * np.sum(np.sign(data), axis=1) # NP detector.\n P[k] = np.where(T > gamma)[0].size / M\n\n # analytically calculate probability of detection.\n Pd = Q(Qinv(pfa[i]) - np.sqrt(d2 * 2))\n\n # plot the results.\n plt.plot(enr_range, P, '*')\n plt.plot(enr_range, Pd)\n\nplt.xlabel(r'$10\\log_{10}\\frac{\\varepsilon}{\\sigma^2}$')\nplt.ylabel(r'$P_D$')\nplt.title(r'$Damped \\; Exponential \\; in \\; WGN$')\nplt.grid()\nplt.show()\n","repo_name":"zekeriyasari/detection","sub_path":"test_cases/dc_level_laplacian.py","file_name":"dc_level_laplacian.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"39738169191","text":"import os\nimport sys\n\nfilename = sys.argv[1]\nwith open(filename) as f:\n content = f.readlines()\n for l in content:\n name = l.split()[0]\n print(name)\n os.system(\"go get \" + name + \"@latest\")","repo_name":"hashicorp/vault","sub_path":"scripts/deps_upgrade.py","file_name":"deps_upgrade.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":28872,"dataset":"github-code","pt":"67"}
+{"seq_id":"70282767575","text":"\"\"\"\nClone a VM from template example\n\"\"\"\nfrom pyVmomi import vim\nfrom ..tools import pchelper\nfrom .add_nic_to_vm import add_nic\n\n\ndef wait_for_task(task):\n \"\"\"wait for a vCenter task to finish\"\"\"\n task_done = False\n while not task_done:\n if task.info.state == \"success\":\n return task.info.result\n\n if task.info.state == \"error\":\n print(\"there was an error\")\n print(task.info.error)\n task_done = True\n\n\ndef clone_vm(\n content,\n template,\n vm_name,\n datacenter_name,\n vm_folder,\n datastore_name,\n cluster_name,\n resource_pool,\n power_on,\n datastorecluster_name,\n):\n \"\"\"\n Clone a VM from a template/VM, datacenter_name, vm_folder, datastore_name\n cluster_name, resource_pool, and power_on are all optional.\n \"\"\"\n\n # if none git the first one\n datacenter = pchelper.get_obj(content, [vim.Datacenter], datacenter_name)\n\n if vm_folder:\n destfolder = pchelper.search_for_obj(content, [vim.Folder], vm_folder)\n else:\n destfolder = datacenter.vmFolder\n\n if datastore_name:\n datastore = pchelper.search_for_obj(content, [vim.Datastore], datastore_name)\n else:\n datastore = pchelper.get_obj(\n content, [vim.Datastore], template.datastore[0].info.name\n )\n\n # if None, get the first one\n cluster = pchelper.search_for_obj(\n content, [vim.ClusterComputeResource], cluster_name\n )\n if not cluster:\n clusters = pchelper.get_all_obj(content, [vim.ResourcePool])\n cluster = list(clusters)[0]\n\n if resource_pool:\n resource_pool = pchelper.search_for_obj(\n content, [vim.ResourcePool], resource_pool\n )\n else:\n resource_pool = cluster.resourcePool\n vmconf = vim.vm.ConfigSpec()\n\n if datastorecluster_name:\n podsel = vim.storageDrs.PodSelectionSpec()\n pod = pchelper.get_obj(content, [vim.StoragePod], datastorecluster_name)\n podsel.storagePod = pod\n storagespec = vim.storageDrs.StoragePlacementSpec()\n storagespec.podSelectionSpec = podsel\n storagespec.type = \"create\"\n storagespec.folder = destfolder\n storagespec.resourcePool = resource_pool\n storagespec.configSpec = vmconf\n try:\n rec = content.storageResourceManager.RecommendDatastores(\n storageSpec=storagespec\n )\n rec_action = rec.recommendations[0].action[0]\n real_datastore_name = rec_action.destination.name\n except Exception:\n real_datastore_name = template.datastore[0].info.name\n datastore = pchelper.get_obj(content, [vim.Datastore], real_datastore_name)\n relospec = vim.vm.RelocateSpec()\n relospec.datastore = datastore\n relospec.pool = resource_pool\n clonespec = vim.vm.CloneSpec()\n clonespec.location = relospec\n clonespec.powerOn = power_on\n print(\"cloning VM...\")\n task = template.Clone(folder=destfolder, name=vm_name, spec=clonespec)\n wait_for_task(task)\n print(\"VM cloned.\")\n\n\ndef main(\n si,\n template,\n vm_name,\n datacenter_name,\n vm_folder,\n datastore_name,\n cluster_name,\n resource_pool,\n power_on,\n datastorecluster_name,\n opaque_network_name,\n):\n content = si.RetrieveContent()\n template = pchelper.get_obj(content, [vim.VirtualMachine], template)\n\n if template:\n clone_vm(\n content,\n template,\n vm_name,\n datacenter_name,\n vm_folder,\n datastore_name,\n cluster_name,\n resource_pool,\n power_on,\n datastorecluster_name,\n )\n if opaque_network_name:\n vm = pchelper.get_obj(content, [vim.VirtualMachine], vm_name)\n add_nic(si, vm, opaque_network_name)\n else:\n print(\"template not found\")\n","repo_name":"officialalikhani/vsphere-automation","sub_path":"vsphere_automation/core/clone_vm.py","file_name":"clone_vm.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"36057014476","text":"\ndef min_swaps(arr):\n arr_pos = [*enumerate(arr)]\n arr_pos.sort(key=lambda it:it[1])\n visited = {k:False for k in range(len(arr))}\n ans = 0\n for i in range(len(arr)):\n if visited[i] or arr_pos[i][0] == i:\n continue\n cycle_size = 0\n j = i\n while not visited[j]:\n visited[j] = True\n j = arr_pos[j][0]\n cycle_size += 1\n if cycle_size > 0:\n ans += cycle_size - 1\n return ans\n\n\nif __name__ == '__main__':\n assert min_swaps([3, 4, -2, 1, 2]) == 4","repo_name":"smartinsert/CodingProblem","sub_path":"minimum_swaps.py","file_name":"minimum_swaps.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"12527560270","text":"import sqlalchemy as sa\nfrom sqlalchemy.ext.declarative import declared_attr\nfrom crud_components.utils.validators.uid import uid_str, parse_uid\n\n\nclass IdMixinWithSequence:\n pass\n\n\ndef id_with_sequence(sequence):\n inherits = IdMixinWithSequence if sequence else object\n\n class IdWithSequence(inherits):\n # pylint: disable=no-self-argument,method-hidden\n @declared_attr.cascading\n def id(cls):\n info = {\n 'ordering': 0,\n 'exposed_name': 'id',\n 'display_name': 'ID',\n 'type': 'integer',\n 'orderable': False,\n 'searchable': False,\n 'nullable': False,\n 'editable': False,\n 'localizable': False,\n 'unique': True,\n 'visible': False,\n 'generated': True,\n 'implicit': True,\n 'summary': True,\n 'quick_search': False,\n }\n\n from .uid_mixin import UidMixin\n if issubclass(cls, UidMixin): # Ugly, but with declared_attr.cascading we cannot override \"id\" in subclasses\n # This works but it interferes with references: it will change exposed_as to e.g. organization.uid\n # info['exposed_as'] = 'uid'\n info['exposed_as'] = lambda s, f: uid_str(prefix=f.extras['uid_prefix'], serial_id=getattr(s, f.internal_name), version=None)\n info['unexposed_as'] = lambda s, f, v: parse_uid(v, prefix=f.extras['uid_prefix']).serial_id if v else None\n info['exposed_name'] = 'uid'\n info['type'] = 'uid'\n info['uid_prefix'] = getattr(cls, 'UID_PREFIX', None)\n\n for base in cls.__mro__[1:-1]:\n if getattr(base, '__table__', None) is not None:\n col_type = sa.ForeignKey(base.id)\n col_kwargs = dict()\n break\n else:\n col_type = sa.Integer\n col_kwargs = dict(autoincrement=True)\n col_args = getattr(cls, '__id_args__', tuple())\n col_kwargs.update(getattr(cls, '__id_kwargs__', dict()))\n\n if sequence:\n col = sa.Column(col_type, *col_args, sequence, primary_key=True, info=info, **col_kwargs)\n else:\n col = sa.Column(col_type, *col_args, primary_key=True, info=info, **col_kwargs)\n # This makes sure it's created first because TranslatableMixin assumes the order of the primary keys\n col._creation_order = -1\n return col\n\n @classmethod\n def find(cls, identifier):\n if identifier is None:\n return None\n pkey_value = int(identifier)\n return cls.query.get(pkey_value)\n return IdWithSequence\n\n\nclass IdMixin(id_with_sequence(None)):\n pass\n","repo_name":"interactivelife/crud_components","sub_path":"crud_components/database/mixins/id_mixin.py","file_name":"id_mixin.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"20521105160","text":"from Stack import Stack\n\n\ndef sort_stack(stack):\n if stack.isEmpty():\n return stack\n\n out = Stack()\n\n while not stack.isEmpty():\n temp = stack.pop()\n while not out.isEmpty() and out.peek() > temp:\n stack.push(out.pop())\n out.push(temp)\n\n return out\n\n\narr = [1, 4, 10, 5, 6]\nstack = Stack()\nfor n in arr:\n stack.push(n)\nsorted_stack = sort_stack(stack)\nout = []\nwhile not sorted_stack.isEmpty():\n x = sorted_stack.pop()\n out.append(x)\nprint(out)\n","repo_name":"YeskendirK/CtCI-6th","sub_path":"Chapter 3. Stacks and Queues/sort_stack.py","file_name":"sort_stack.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"74819730454","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='listings'),\n path('', views.listing, name='listing'),\n path('search/', views.search, name='search'),\n path('vendors/', views.vendors, name='vendors'),\n path('categories/', views.categories, name='categories'),\n path('categories/', views.category, name='category'),\n\n path('vendors/', views.vendor, name=\"vendor\"),\n path('tag//', views.index, name='listing_with_tag'),\n]\n ","repo_name":"rimlol/edtech_market","sub_path":"listings/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"19447781637","text":"\"\"\"\r\n1-Aug-2020\r\nThis code was written by Ziv Ronen to help plotting Bitlet paper graphs.\r\nThis is an ad hoc code for displaying specific requested graphs\r\n\"\"\"\r\n\r\nimport math\r\nimport re\r\nimport enum\r\nimport numbers\r\nimport itertools\r\nimport argparse\r\nimport collections\r\nimport typing\r\n\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.colors as colors\r\nimport matplotlib.text as text\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport scipy.interpolate\r\n\r\nimport PySimpleGUI as sg\r\n\r\nBIG_NUMBER = 2132039\r\n\r\n\r\nclass BoundaryFit(enum.Enum):\r\n NONE = False\r\n MIN_FIT = 1\r\n MAX_FIT = 2\r\n\r\n\r\nclass LineInfo(typing.NamedTuple):\r\n label_location: typing.Tuple[float, float]\r\n line_locations: typing.Sequence[typing.Tuple[float, float]]\r\n\r\n\r\ndef floats_or_axis(str_value):\r\n str_value = str_value.lower()\r\n if str_value == 'x' or str_value == 'y':\r\n return str_value\r\n\r\n return [float(float_str) for float_str in str_value.split(',')]\r\n\r\n\r\nclass Caller:\r\n def __init__(self, function, parameters_mapping):\r\n self._function = function\r\n self._parameters_mapping = parameters_mapping\r\n\r\n def get_const_arguments(self):\r\n return {\r\n parameter: self._parameters_mapping[parameter]\r\n for parameter in self._function.__code__.co_varnames\r\n if isinstance(self._parameters_mapping[parameter], numbers.Number)\r\n }\r\n\r\n def get_all_arguments(self):\r\n return {\r\n parameter: self._parameters_mapping[parameter]\r\n for parameter in self._function.__code__.co_varnames\r\n }\r\n\r\n def __call__(self, **kwargs):\r\n arguments_mapping = {\r\n parameter: (value if value not in kwargs else kwargs[value])\r\n for parameter, value in self.get_all_arguments().items()\r\n }\r\n return self._function(**arguments_mapping)\r\n\r\n def build(self, value_function):\r\n if value_function is None:\r\n return None\r\n return type(self)(build_function(value_function), self.get_all_arguments())\r\n\r\n def get_label(self, keyword):\r\n for key, value in self._parameters_mapping.items():\r\n if value == keyword:\r\n return key\r\n\r\n raise KeyError('keyword is not mapped')\r\n\r\n\r\nclass LinesCalculator:\r\n\r\n @staticmethod\r\n def _get_forced_fit(mapping, x_key, y_key, line_value, sensitivity):\r\n value = mapping[x_key][y_key]\r\n if abs(value - line_value) / (max(abs(value), abs(line_value)) + 1) <= sensitivity:\r\n # The value is actually correct\r\n return BoundaryFit.NONE\r\n\r\n if y_key == min(mapping[x_key].keys()):\r\n return BoundaryFit.MIN_FIT\r\n\r\n if y_key == max(mapping[x_key].keys()):\r\n return BoundaryFit.MAX_FIT\r\n\r\n return BoundaryFit.NONE\r\n\r\n @classmethod\r\n def _find_closest_to_line(cls, mapping, line_value, base_value_function=None, is_vertical=False):\r\n def distance_function(x, y, base_z):\r\n function_value = base_z if base_value_function is None else float(base_value_function(x=x, y=y))\r\n return abs(function_value - float(line_value))\r\n\r\n if not is_vertical:\r\n actual_mapping = mapping\r\n else:\r\n all_y_values = set(itertools.chain.from_iterable(\r\n [y_key for y_key in y_values.keys()] for y_values in mapping.values()\r\n ))\r\n actual_mapping = {\r\n y_key: {x_key: mapping[x_key][y_key] for x_key, y_values in mapping.items() if y_key in y_values}\r\n for y_key in all_y_values\r\n }\r\n\r\n line_locations = []\r\n if not is_vertical:\r\n for x_key, y_values in actual_mapping.items():\r\n y_key, _ = min(y_values.items(), key=lambda y: distance_function(x_key, y[0], y[1]))\r\n line_locations.append((x_key, y_key))\r\n\r\n else:\r\n for y_key, x_values in actual_mapping.items():\r\n x_key, _ = min(x_values.items(), key=lambda x: distance_function(x[0], y_key, x[1]))\r\n line_locations.append((x_key, y_key))\r\n\r\n return line_locations\r\n\r\n @classmethod\r\n def _fix_line(cls, line_locations, line_value, mapping, sensitivity):\r\n fixed_line_locations = []\r\n line_location_ex = zip(line_locations, itertools.chain(line_locations[1:], [line_locations[-1]]))\r\n for (x_key, y_key), (_, next_y_key) in line_location_ex:\r\n if y_key == next_y_key:\r\n forced_fit_type = cls._get_forced_fit(mapping, x_key, y_key, line_value, sensitivity)\r\n if forced_fit_type is not BoundaryFit.NONE:\r\n if len(fixed_line_locations) != 0 and not isinstance(fixed_line_locations[-1][1], BoundaryFit):\r\n fixed_line_locations.append((x_key, forced_fit_type))\r\n\r\n continue\r\n\r\n fixed_line_locations.append((x_key, y_key))\r\n\r\n return fixed_line_locations\r\n\r\n @classmethod\r\n def _find_label_location(cls, fixed_locations, is_vertical=False):\r\n if is_vertical:\r\n line_drawn_ys = set(y for _, y in fixed_locations)\r\n label_y_value = list(sorted(line_drawn_ys))[int(0.54 * len(line_drawn_ys))]\r\n label_x_value = next(x for x, y in fixed_locations if y == label_y_value)\r\n\r\n else:\r\n line_drawn_xs = set(x for x, _ in fixed_locations)\r\n label_x_value = list(sorted(line_drawn_xs))[int(0.5 * len(line_drawn_xs))]\r\n label_y_value = next(y for x, y in fixed_locations if x == label_x_value)\r\n\r\n return label_x_value, label_y_value\r\n\r\n @classmethod\r\n def run(cls, mapping, line_value, sensitivity=0.001, value_function=None, is_vertical=False):\r\n line_locations = cls._find_closest_to_line(mapping, line_value, value_function, is_vertical)\r\n fixed_locations = cls._fix_line(line_locations, line_value, mapping, sensitivity)\r\n if len(fixed_locations) < 2:\r\n return None\r\n\r\n label_x, label_y = cls._find_label_location(fixed_locations, is_vertical)\r\n return LineInfo((label_x, label_y), fixed_locations)\r\n\r\n\r\ndef fix_axis(axis, unscaled_values, scaled_values, set_ticks, set_labels):\r\n relevant_values = [\r\n scaled for value, scaled in zip(unscaled_values, scaled_values)\r\n if value.is_integer()\r\n ]\r\n\r\n min_tick, max_tick = axis.get_ticklocs()[0], axis.get_ticklocs()[-1]\r\n delta_tick = (max_tick - min_tick) / (len(relevant_values) - 1)\r\n set_ticks([min_tick + i * delta_tick for i in range(len(relevant_values))])\r\n set_labels([text.Text(0, 0, format_float(scaled)) for scaled in relevant_values])\r\n\r\n\r\ndef format_float(value):\r\n base_value = round(float(value), 1)\r\n return str(int(base_value) if base_value.is_integer() else base_value)\r\n\r\n\r\nclass LocationFixer:\r\n def __init__(self, ax, x_mapping, y_mapping):\r\n x_labels_position = ax.xaxis.get_ticklocs()\r\n self._x_tick_min = min(x_labels_position)\r\n self._x_axis_size = max(x_labels_position) - self._x_tick_min\r\n\r\n y_labels_position = ax.yaxis.get_ticklocs()\r\n self._y_tick_min = min(y_labels_position)\r\n self._y_tick_max = max(y_labels_position)\r\n self._y_axis_size = self._y_tick_max - self._y_tick_min\r\n\r\n self._min_x_value = min(x_mapping.keys())\r\n self._x_value_range = max(x_mapping.keys()) - self._min_x_value\r\n self._min_y_value = min(y_mapping.keys())\r\n self._y_value_range = max(y_mapping.keys()) - self._min_y_value\r\n\r\n self._x_reverse_mapping = {value: key for key, value in x_mapping.items()}\r\n self._y_reverse_mapping = {value: key for key, value in y_mapping.items()}\r\n\r\n def get_location(self, scaled_x, scaled_y):\r\n x_location = self._x_tick_min + self.get_x(self._x_reverse_mapping[scaled_x] - self._min_x_value)\r\n\r\n if scaled_y is BoundaryFit.MIN_FIT:\r\n y_location = 0\r\n elif scaled_y is BoundaryFit.MAX_FIT:\r\n y_location = self._y_tick_max\r\n else:\r\n y_location = self._y_tick_min + self.get_y(self._y_reverse_mapping[scaled_y] - self._min_y_value)\r\n\r\n return x_location, self._y_tick_max - y_location\r\n\r\n def get_x(self, unscaled_x):\r\n normalized_x_location = unscaled_x / self._x_value_range\r\n return normalized_x_location * self._x_axis_size\r\n\r\n def get_y(self, unscaled_y):\r\n normalized_y_location = unscaled_y / self._y_value_range\r\n return normalized_y_location * self._y_axis_size\r\n\r\n\r\ndef iter_targets(caller, target_description):\r\n target = caller.build(target_description)()\r\n if isinstance(target, numbers.Number):\r\n return [(format_float(target), target)]\r\n\r\n if isinstance(target, typing.Mapping):\r\n return target.items()\r\n\r\n if isinstance(target, typing.Iterable):\r\n return ((format_float(val), val) for val in target)\r\n\r\n raise ValueError('target description can not be used for lines [got: {}]'.format(target_description))\r\n\r\n\r\ndef add_line(data, fixer, line_conf, target, value_function, label_text, is_vertical):\r\n line_info = LinesCalculator.run(data, target, value_function=value_function, is_vertical=is_vertical)\r\n if line_info is None:\r\n return\r\n\r\n line_x, line_y = list(zip(*(fixer.get_location(x, y) for x, y in line_info.line_locations)))\r\n if is_vertical:\r\n plt.plot(line_x, line_y, **line_conf)\r\n else:\r\n x_new = np.linspace(min(line_x), max(line_x), 300)\r\n smoothed_y = scipy.interpolate.make_interp_spline(line_x, line_y)(x_new)\r\n plt.plot(line_x, line_y, **line_conf)\r\n\r\n label_position_x, label_position_y = fixer.get_location(*line_info.label_location)\r\n plt.text(\r\n # TODO: the added values might required changes between the graphs for maximum readability\r\n label_position_x + fixer.get_x(0.01),\r\n label_position_y + fixer.get_y(-0.05),\r\n label_text,\r\n color=line_conf.get('color', 'black'),\r\n )\r\n\r\n\r\ndef add_lines(ax, caller, data, lines, x_mapping, y_mapping):\r\n fixer = LocationFixer(ax, x_mapping, y_mapping)\r\n for line_conf, value_function_description, is_vertical, targets in lines:\r\n value_function = caller.build(value_function_description)\r\n for target_description in targets:\r\n for label_text, target in iter_targets(caller, target_description):\r\n add_line(data, fixer, line_conf, target, value_function, label_text, is_vertical)\r\n\r\n\r\ndef plot(x_values, y_values, caller, lines, scale_x=2, scale_y=2):\r\n x_mapping = {x: scale_x ** x for x in x_values}\r\n y_mapping = {y: scale_y ** y for y in reversed(y_values)}\r\n\r\n data = collections.OrderedDict(\r\n (x, collections.OrderedDict((y, caller(x=x, y=y)) for y in y_mapping.values()))\r\n for x in x_mapping.values()\r\n )\r\n\r\n df = pd.DataFrame(data)\r\n fig, ax = plt.subplots(figsize=(12, 7))\r\n\r\n title_consts_str = ', '.join(\r\n '{}={}'.format(key, format_float(value))\r\n for key, value in caller.get_const_arguments().items()\r\n )\r\n\r\n windows_consts_str = '__'.join(\r\n '{}_{}'.format(key.lower(), format_float(value))\r\n for key, value in caller.get_const_arguments().items()\r\n )\r\n\r\n label_x = caller.get_label('x')\r\n label_y = caller.get_label('y')\r\n fig.canvas.set_window_title('z__{}_x__{}_y__{}'.format(label_x, label_y, windows_consts_str))\r\n plt.title('{}'.format(title_consts_str), fontsize=18)\r\n\r\n norm = colors.LogNorm()\r\n sns.heatmap(df, cmap=plt.get_cmap('RdYlGn'), linewidths=0, ax=ax, norm=norm)\r\n\r\n fix_axis(ax.xaxis, x_mapping.keys(), x_mapping.values(), ax.set_xticks, ax.set_xticklabels)\r\n fix_axis(ax.yaxis, y_mapping.keys(), y_mapping.values(), ax.set_yticks, ax.set_yticklabels)\r\n\r\n plt.xlabel(label_x)\r\n plt.ylabel(label_y)\r\n # Add lines\r\n add_lines(ax, caller, data, lines, x_mapping, y_mapping)\r\n\r\n\r\ndef build_function(function_expression):\r\n variables_pattern = r'<([A-Za-z]\\w*)>'\r\n parameters_names = ', '.join(set(re.findall(variables_pattern, function_expression)))\r\n function_expression_fixed = re.sub(variables_pattern, r'\\1', function_expression)\r\n lambda_expression = 'lambda {}: {}'.format(parameters_names, function_expression_fixed)\r\n return eval(lambda_expression)\r\n\r\n\r\ndef parse_arguments(argv=None):\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('function', help='The function to parse')\r\n\r\n parser.add_argument('x_max', type=int, help='max x value')\r\n parser.add_argument('y_max', type=int, help='max y value')\r\n parser.add_argument('--x-min', type=int, default=0, help='min x value')\r\n parser.add_argument('--y-min', type=int, default=0, help='min y value')\r\n parser.add_argument(\r\n '--lines', nargs='+', action='append', type=str, default=[],\r\n help='Lines, each should be given as: --lines