diff --git "a/302.jsonl" "b/302.jsonl" new file mode 100644--- /dev/null +++ "b/302.jsonl" @@ -0,0 +1,723 @@ +{"seq_id":"532863556","text":"\n#setStkCSVFile.py\n\n\n\"\"\"Very basic program with no prompts or GUI:\n 1. Creates new CSV file or accesses existing file\n for specified list of stock symbols and\n date ranges. (uses Yahoo Finance for data)\n 2. Also allows for choosing between updating existing CSV file\n or creating a new CSV file\n 3. Weekly data as of close Tuesday is used to sync witg COT data\n 4. counts # of rows in file\n\"\"\"\nimport pandas.io.data as pullData\nimport datetime\nimport time\n\n#############################################################\nclass setCSVFile():\n def __init__(self,symbol):\n # self.symbolList = symbolList\n self.symbol = symbol\n # self.validSymbols = []\n # self.invalidSymbols = []\n\n\n def accessSite(self,start,end):\n # print('self.symbol: ', self.symbol)\n self.start = start\n self.end = end\n try:\n self.timeSeries0 = pullData.DataReader(self.symbol, 'yahoo', self.start, self.end)\n print(\"***CSV file for {0} has just been created\".format(self.symbol.upper()))\n print()\n except:\n print()\n print(\"***ERROR***: {0} is not a valid symbol\".format(self.symbol.upper()))\n print()\n placeFiller = input(\"Hit Enter/Return to continue\")\n print()\n badSymbol = 'NO'\n return badSymbol\n\n def weekOrDay(self,freq):\n # self.timeSeries0 = self.timeSeries0.asfreq('W-TUE')\n self.timeSeries0 = self.timeSeries0.asfreq(freq)\n\n # print(\"Entered setStkCSVFile.py to use existing file\")\n #alternate way to retrieve data\n #self.timeSeries0 = pd.io.data.get_data_yahoo(symbol, self.start, self.end)\n ##for SP500 in above line only use '%5EGSPC' as symbol\n # self.createCSV\n\n def createCSV(self):\n print\n # self.timeSeries0.to_csv('{0} ohlc.csv'.format(self.symbol))\n self.timeSeries0.to_csv('../{0} ohlc.csv'.format(self.symbol))\n # self.dataFile = pullData.read_csv('{0} ohlc.csv'.format(self.symbol), index_col='Date',parse_dates=True)\n self.dataFile = pullData.read_csv('../{0} ohlc.csv'.format(self.symbol), index_col='Date', parse_dates=True)\n return self.dataFile\n\n def useCurrentCSV(self):\n self.dataFile = pullData.read_csv('{0} ohlc.csv'.format(self.symbol), index_col='Date',parse_dates=True)\n #print('self.dataFile print:', self.dataFile[2:3])\n print(\"Entered stxSetFile1b.py to create new file\")\n return self.dataFile\n\n def countRows(self,csv1):\n self.dayCounter = 0\n #print('csv1: ', csv1)\n for i in range(len(csv1)):\n # print(i)\n self.dayCounter +=1\n print('days in the file:',self.dayCounter)\n return self.dayCounter\n\n def populateYorN(self,symbolList,ID_NameKey,freq):\n print()\n print('Populate SQL Table for: ')\n # print('SS: ',self.symbol)\n for i in self.symbolList:\n print(i.upper())\n populateSQL = input(\"Enter 'y' for yes or anything else for 'no': \")\n print()\n\n if populateSQL == 'y':\n createOrExisting = input(\"Create new table('newyesnew') or update existing ('u')? \")\n if createOrExisting == 'newyesnew' or createOrExisting == 'u':\n import stkSQLFill1\n stkSQLFill1.main(self.symbolList,createOrExisting,ID_NameKey,freq)\n else:\n print()\n print(\"'{0}' is an incorrect entry. Try again\".format(createOrExisting))\n print()\n return False\n else:\n return\n\n#########################################################\n#########################################################\ndef main(symbol,choice1a,freq,startDate1,endDate1,ID_NameKey,actionSelected):\n # validSymbols = []\n # invalidSymbols = []\n valid = True\n # print(\"symbolList: \", symbol)\n # print(symbol,choice1a,freq,startDate1,endDate1,ID_NameKey,actionSelected)\n\n a = setCSVFile(symbol)\n # a = setCSVFile(symbolList,i)\n\n if choice1a == 'e' :\n csv1 = a.useCurrentCSV()\n # validSymbols.append(symbol)\n # return csv1\n\n if choice1a == 'n':\n # checker = True\n a2 = a.accessSite(startDate1,endDate1)\n if a2 == 'NO':\n # print(\"SKIP & MOVE ON AS {0} IS NOT A VALID SYMBOL\".format(symbol.upper()))\n print()\n # invalidSymbols.append(symbol)\n toReturn = ['Dummy',False]\n return toReturn\n else:\n if freq != 'd':\n a.weekOrDay(freq)\n csv1 = a.createCSV()\n # validSymbols.append(symbol)\n else:\n csv1 = a.createCSV()\n # validSymbols.append(symbol)\n\n # fileDays = a.countRows(csv1)\n # print(\"Valid: \",validSymbols)\n # print(\"Invalid: \",invalidSymbols)\n toReturn = [csv1,valid]\n return toReturn\n\n\n# # Following is for standalone testing (instead of main() being called by setStkList.py)\n# startDate = '20150101'\n# endDate = '20160301'\n#\n# #Frequency options are 1)'D' 2)'W-TUE' (or whichever day of week preferred) 3)'M 4)'A'\n# frequency = input('Enter Frequencyyyyyy: ').lower()\n#\n# if __name__ == '__main__': main('spy', 'n',frequency,startDate,endDate,1,'actionSelected')\n# # if __name__ == '__main__': main('gld', 'n',frequency,startDate,endDate,3,'actionSelected')\n# # if __name__ == '__main__': main('tlh', 'n',frequency,startDate,endDate,2,'actionSelected')\n# # # # if __name__ == '__main__': main('ief',frequency,startDate,endDate,2,'actionSelected')\n# # if __name__ == '__main__': main('uso', 'n',frequency,startDate,endDate,4,'actionSelected')\n","sub_path":"StkOverall/stkCSV.py","file_name":"stkCSV.py","file_ext":"py","file_size_in_byte":5869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"264372628","text":"# %%\nimport torch\nimport torch.nn.functional as F\nimport torchvision\nimport numpy as np\nimport pandas as pd\nimport random\nimport os\nimport copy\nimport PIL\nimport argparse\n\nfrom tqdm import tqdm\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import confusion_matrix\n\nfrom utils.config import base_path\nfrom utils import vars\nfrom utils import trainer\nfrom utils import metrics\nfrom utils import utils\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom models import covid_classifier\nfrom models import pneumonia_classifier\n\nfrom datasets import corda\n\nimport functools\n\n# %%\nlr = 1e-1\nseed = vars.seed\ndevice = torch.device('cuda:0')\nepochs = 1\nmetric = 'auc'\nmode = 'max'\n\n# %%\nparser = argparse.ArgumentParser()\nparser.add_argument('--epochs', type=int, default=100, help='num. of epochs (default 100)')\nparser.add_argument('--lr', type=float, default=1e-1, help='learning rate default (0.1)')\nparser.add_argument('--metric', type=str, default='auc', help='Metric for best model (default auc)')\nparser.add_argument('--mode', type=str, default='max', help='max or min (default max)')\nparser.add_argument('--arch', type=str, default='resnet18', help='encoder architecture (resnet18 or resnet50)')\nparser.add_argument('--pretrain', type=str, default='chestxray', help='pretrained (chestxray, rsna, none)')\nparser.add_argument('--train', type=str, default='corda', help='corda, corda+chest, corda+rsna, corda+cohen, cohen')\nargs = parser.parse_args()\n\nlr = args.lr\nepochs = args.epochs\nmetric = args.metric\nmode = args.mode\n\n# %%\ndevice = torch.device('cuda:0')\nutils.set_seed(seed)\n\nmodel_preprocessed = 'unprocessed'\nfeature_preprocessed = 'unprocessed'\npreprocessed = '(unprocessed)'\n\n# %%\ncorda_dataset = 'CORDA-dataset'\ncorda_version = f'CORDA-dataset-{vars.corda_version}'\ncorda_basepath = os.path.join(base_path, 'corda', corda_version, corda_dataset)\ncorda_df = pd.read_csv(os.path.join(corda_basepath, 'CORDA_fix.csv'))\ncorda_train_df, corda_test_df = train_test_split(corda_df, test_size=0.3, random_state=vars.seed, stratify=corda_df.covid)\ncorda_train_df, corda_val_df = train_test_split(corda_train_df, test_size=0.2, random_state=vars.seed, stratify=corda_train_df.covid)\n\n\n# %%\nrsna_dataset = 'rsna_bal_subset'\nrsna_basepath = os.path.join(base_path, rsna_dataset)\nrsna_df = pd.read_csv(os.path.join(rsna_basepath, 'stage_2_train_labels_subset.csv'))\nrsna_train_df = corda.preprocess_rsna_df(rsna_df)\nrsna_train_df, rsna_test_df = train_test_split(rsna_df, test_size=0.3, random_state=vars.seed, stratify=rsna_train_df.label)\nrsna_train_df, rsna_val_df = train_test_split(rsna_train_df, test_size=0.2, random_state=vars.seed, stratify=rsna_train_df.label)\n\n# %%\nchestxray_dataset = 'chest_xray'\nchestxray_basepath = os.path.join(base_path, chestxray_dataset)\nchestxray_train_df = pd.read_csv(os.path.join(chestxray_basepath, 'train_3_classes.csv'))\nchestxray_val_df = pd.read_csv(os.path.join(chestxray_basepath, 'val_3_classes.csv'))\nchestxray_test_df = pd.read_csv(os.path.join(chestxray_basepath, 'test_3_classes.csv'))\nchestxray_train_df = corda.preprocess_chest_df(chestxray_train_df)\nchestxray_val_df = corda.preprocess_chest_df(chestxray_val_df)\nchestxray_test_df = corda.preprocess_chest_df(chestxray_test_df)\n\n# %%\ncohen_dataset = 'cohen'\ncohen_basepath = os.path.join(base_path, cohen_dataset)\ncohen_train_df = pd.read_csv(os.path.join(cohen_basepath, 'train.csv'))\ncohen_test_df = pd.read_csv(os.path.join(cohen_basepath, 'test.csv'))\ncohen_train_df = corda.preprocess_cohen_df(cohen_train_df)\ncohen_test_df = corda.preprocess_cohen_df(cohen_test_df)\ncohen_train_df, cohen_val_df = train_test_split(cohen_train_df, test_size=0.2, random_state=vars.seed, stratify=cohen_train_df.covid)\n\n# %%\ndef balance_corda_with_other(corda_df, other_df):\n covid1_size = len(corda_df[corda_df.covid == 1])\n covid0_size = len(corda_df[corda_df.covid == 0])\n delta = covid1_size - covid0_size\n corda_df = pd.concat((corda_df, other_df.sample(n=delta, random_state=vars.seed).copy()))\n return corda_df\n\n# %% MEAN & STD\nencoder_df = pd.concat((corda_train_df, corda_val_df))\n\nif args.pretrain == 'chestxray':\n encoder_df = pd.concat((encoder_df, chestxray_train_df, chestxray_val_df))\n\nelif args.pretrain == 'rsna':\n encoder_df = pd.concat((encoder_df, rsna_train_df))\n\nelif args.pretrain == 'none':\n pass\n\nelse:\n print(f'Unkown pretrain value: {args.pretrain}')\n exit(1)\n\nstats_transforms = torchvision.transforms.Compose([\n torchvision.transforms.Resize(256),\n torchvision.transforms.CenterCrop(224),\n torchvision.transforms.ToTensor(),\n])\n\nstats_dataset = corda.CORDA(\n encoder_df,\n corda_base_path=corda_basepath,\n rsna_base_path=rsna_basepath,\n chest_base_path=chestxray_basepath,\n transform=stats_transforms\n)\n\nstats_dataloader = torch.utils.data.DataLoader(\n stats_dataset, batch_size=10,\n shuffle=False, num_workers=10,\n worker_init_fn=lambda id: utils.set_seed(seed),\n pin_memory=True\n)\n\nmean, std = utils.get_mean_and_std(stats_dataloader)\nprint(f'Mean & std for corda+{args.pretrain}:', mean, std)\n\n\n\n# CORDA ONLY (balance majority class)\ntrain_df = corda_train_df\nval_df = corda_val_df\ntest_df = corda_test_df\n\nif args.train == 'corda':\n train_df = pd.concat((\n corda_train_df[corda_train_df.covid == 0].sample(n=84, random_state=42),\n corda_train_df[corda_train_df.covid == 1].sample(n=84, random_state=42)\n ))\n\nelif args.train == 'corda+chest':\n train_df = balance_corda_with_other(corda_train_df, chestxray_train_df)\n val_df = balance_corda_with_other(corda_val_df, chestxray_val_df)\n test_df = balance_corda_with_other(corda_test_df, chestxray_test_df)\n\nelif args.train == 'corda+rsna':\n train_df = balance_corda_with_other(corda_train_df, rsna_train_df)\n val_df = balance_corda_with_other(corda_val_df, rsna_val_df)\n test_df = balance_corda_with_other(corda_test_df, rsna_test_df)\n\nelif args.train == 'corda+cohen':\n train_df = pd.concat((train_df, cohen_train_df))\n val_df = pd.concat((val_df, cohen_val_df))\n test_df = pd.concat((test_df, cohen_test_df))\n\n noncovid_size = len(train_df[train_df.covid == 0])\n train_df = pd.concat((\n train_df[train_df.covid == 0].sample(n=noncovid_size, random_state=42),\n train_df[train_df.covid == 1].sample(n=noncovid_size, random_state=42)\n ))\n\nelif args.train == 'cohen':\n train_df = cohen_train_df\n val_df = cohen_val_df\n test_df = cohen_test_df\n\nelse:\n print(f'Unknown train mode: {args.train}')\n exit(1)\n\n\n\n# %%\ntrain_transforms = torchvision.transforms.Compose([\n torchvision.transforms.Resize(256),\n torchvision.transforms.RandomHorizontalFlip(p=0.2),\n torchvision.transforms.RandomAffine((-1, 1), translate=(0, 0.1), scale=(1, 1.1)),\n torchvision.transforms.CenterCrop(224),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(mean, std)\n])\n\ntransforms = torchvision.transforms.Compose([\n torchvision.transforms.Resize(256),\n torchvision.transforms.CenterCrop(224),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(mean, std),\n])\n\n\n# %%\ntrain_dataset = corda.CORDA(\n train_df,\n corda_base_path=corda_basepath,\n chest_base_path=chestxray_basepath,\n rsna_base_path=rsna_basepath,\n cohen_base_path=cohen_basepath,\n transform=train_transforms\n)\n\nval_dataset = corda.CORDA(\n val_df,\n corda_base_path=corda_basepath,\n chest_base_path=chestxray_basepath,\n rsna_base_path=rsna_basepath,\n cohen_base_path=cohen_basepath,\n transform=transforms\n)\n\ntest_dataset = corda.CORDA(\n test_df,\n corda_base_path=corda_basepath,\n chest_base_path=chestxray_basepath,\n rsna_base_path=rsna_basepath,\n cohen_base_path=cohen_basepath,\n transform=transforms\n)\n\ntrain_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=4,\n shuffle=True, num_workers=0,\n worker_init_fn=lambda id: utils.set_seed(seed),\n pin_memory=True\n)\n\nval_dataloader = torch.utils.data.DataLoader(\n val_dataset, batch_size=10,\n shuffle=False, num_workers=4,\n worker_init_fn=lambda id: utils.set_seed(seed+id),\n pin_memory=True\n)\n\ntest_dataloader = torch.utils.data.DataLoader(\n test_dataset, batch_size=10,\n shuffle=False, num_workers=4,\n worker_init_fn=lambda id: utils.set_seed(seed+id)\n)\n\n# %%\nif args.arch not in ['resnet18', 'resnet50']:\n print(f'Unkown arch {args.arch}')\n exit(0)\n\nname = f'{args.arch}-unprocessed/{args.pretrain}/{args.train}'\nutils.ensure_dir(f'logs/{vars.corda_version}/{name}')\nutils.ensure_dir(f'models/{vars.corda_version}/{args.arch}-unprocessed/{args.pretrain}')\n\ntrain_df.to_csv(f'logs/{vars.corda_version}/{name}/train.csv', index=False)\nval_df.to_csv(f'logs/{vars.corda_version}/{name}/val.csv', index=False)\ntest_df.to_csv(f'logs/{vars.corda_version}/{name}/test.csv', index=False)\n\nwith open(f'logs/{vars.corda_version}/{name}/stats.txt', 'w') as f:\n f.write(f'Mean, std: {mean}, {std}\\n')\n f.write(f'LR: {args.lr}, epochs: {args.epochs}\\n')\n f.write(f'CORDA dataset size: {len(corda_df)} \\n\\n')\n\n train_cov_size = [\n len(train_df[train_df.covid == 0]),\n len(train_df[train_df.covid == 1])\n ]\n train_rx_size = [\n len(train_df[train_df.rx==0]),\n len(train_df[train_df.rx==1])\n ]\n\n val_cov_size = [\n len(val_df[val_df.covid==0]),\n len(val_df[val_df.covid==1])\n ]\n val_rx_size = [\n len(val_df[val_df.rx==0]),\n len(val_df[val_df.rx==1])\n ]\n\n test_cov_size = [\n len(test_df[test_df.covid==0]),\n len(test_df[test_df.covid==1])\n ]\n test_rx_size = [\n len(test_df[test_df.rx==0]),\n len(test_df[test_df.rx==1])\n ]\n\n f.write(f'Train dataset size: {len(train_df)}, classes: {train_cov_size} (RX: {train_rx_size})\\n')\n f.write(f'Val dataset size: {len(val_df)}, classes: {val_cov_size} (RX: {val_rx_size})\\n')\n f.write(f'Test dataset size: {len(test_df)}, classes: {test_cov_size} (RX: {test_rx_size})\\n')\n\n# MODEL CREATION\n# %%\nfeature_extractor = None\npretrain_path = None\n\npretrain_path = args.pretrain\nif args.pretrain == 'chestxray':\n pretrain_path = '3-classes'\n\nfeature_extractor_path = f'models/{vars.corda_version}/{args.arch}-pneumonia-classifier-s42-{pretrain_path}-unprocessed.pt'\n\nif args.pretrain == 'chestxray' and args.arch == 'resnet18':\n feature_extractor = pneumonia_classifier.PneumoniaClassifierChest(pretrained=True)\n\nelif args.pretrain == 'chestxray' and args.arch == 'resnet50':\n feature_extractor = pneumonia_classifier.PneumoniaClassifierChest50(pretrained=True)\n\nelif args.pretrain == 'rsna' and args.arch == 'resnet18':\n feature_extractor = pneumonia_classifier.PneumoniaClassifierRSNA(pretrained=True)\n\nelif args.pretrain == 'rsna' and args.arch == 'resnet50':\n feature_extractor = pneumonia_classifier.PneumoniaClassifierRSNA50(pretrained=True)\n\nif feature_extractor is not None:\n checkpoint = torch.load(\n feature_extractor_path,\n map_location={'cuda:0': 'cpu'}\n )\n feature_extractor.load_state_dict(checkpoint['model'])\n print(f'Loaded unprocessed feature extractor from epoch {checkpoint[\"epoch\"]}')\n feature_extractor = feature_extractor.encoder\n\nmodel = None\n\nif args.arch == 'resnet18':\n model = covid_classifier.CovidClassifier(\n encoder=feature_extractor,\n pretrained=False,\n freeze_conv=False\n ).to(device)\nelif args.arch == 'resnet50':\n model = covid_classifier.CovidClassifier50(\n encoder=feature_extractor,\n pretrained=False,\n freeze_conv=False\n ).to(device)\n\n#model = covid_classifier.LeNet1024NoPoolingDeep().to(device)\n\nprint(f'Using lr {lr}')\n\n# TRAINING\n# %%\ntracked_metrics = [\n metrics.Accuracy(),\n metrics.RocAuc(),\n metrics.FScore()\n]\n\ndef focal_loss(output, target, gamma=2., weight=None):\n bce = F.binary_cross_entropy(output, target, reduction='none', weight=weight)\n pt = target*output + (1-target)*(1-output)\n return (torch.pow((1-pt), gamma) * bce).mean()\n\ncriterion = focal_loss\noptimizer = torch.optim.SGD(model.parameters(), lr=lr, weight_decay=1e-3)\nlr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=15, verbose=True)\n\nbest_model = trainer.fit(\n model=model, train_dataloader=train_dataloader,\n val_dataloader=val_dataloader, test_dataloader=test_dataloader,\n test_every=10, criterion=criterion,\n optimizer=optimizer, scheduler=lr_scheduler,\n metrics=tracked_metrics, n_epochs=args.epochs, name=name,\n metric_choice=args.metric, mode=args.mode, device=device,\n checkpoint_params={'corda-version': vars.corda_version}\n)\n\nprint(f'Best model: ')\ntest_logs, test_cm = trainer.test(\n model=best_model, test_dataloader=test_dataloader, weight=None,\n criterion=criterion, metrics=tracked_metrics, device=device\n)\nax = sns.heatmap(test_cm.get(normalized=True), annot=True, fmt=\".2f\")\nax.set_title(f'{args.train} Best {preprocessed}')\nplt.xlabel('predicted')\nplt.ylabel('ground')\nhm = ax.get_figure()\nhm.savefig(f'logs/{vars.corda_version}/{name}/best.png')\nhm.clf()\n\n# %%\nprint(f'Final model:')\ntest_logs, test_cm = trainer.test(\n model=model, test_dataloader=test_dataloader, weight=None,\n criterion=criterion, metrics=tracked_metrics, device=device\n)\nax = sns.heatmap(test_cm.get(normalized=True), annot=True, fmt=\".2f\")\nax.set_title(f'{args.train.upper()} {preprocessed}')\nplt.xlabel('predicted')\nplt.ylabel('ground')\nhm = ax.get_figure()\nhm.savefig(f'logs/{vars.corda_version}/{name}/final.png')\nhm.clf()\n\n\nwith open(f'logs/{vars.corda_version}/{name}/stats.txt', 'a') as f:\n f.write(trainer.summarize_metrics(test_logs) + '\\n')\n\n\n\n## EVALUATION\nprint(f'Training finished, benchmarking model..')\n\ncorda_test_dataset = corda.CORDA(\n corda_test_df,\n corda_base_path=corda_basepath,\n transform=transforms\n)\n\ncorda_rxpos_dataset = corda.CORDA(\n corda_test_df[corda_test_df.rx == 1],\n corda_base_path=corda_basepath,\n transform=transforms\n)\n\ncorda_rxneg_dataset = corda.CORDA(\n corda_test_df[corda_test_df.rx == 0],\n corda_base_path=corda_basepath,\n transform=transforms\n)\n\nrsna_test_dataset = corda.CORDA(\n rsna_test_df,\n rsna_base_path=rsna_basepath,\n transform=transforms\n)\n\nrxpos_dataset = corda.CORDA(\n test_df[test_df.rx == 1],\n corda_base_path=corda_basepath,\n chest_base_path=chestxray_basepath,\n rsna_base_path=rsna_basepath,\n cohen_base_path=cohen_basepath,\n transform=transforms\n)\n\nchest_test_dataset = corda.CORDA(\n chestxray_test_df,\n chest_base_path=chestxray_basepath,\n transform=transforms\n)\n\ncohen_all_dataset = corda.CORDA(\n pd.concat((cohen_train_df, cohen_val_df, cohen_test_df)),\n cohen_base_path=cohen_basepath,\n transform=transforms\n)\n\ncohen_test_dataset = corda.CORDA(\n cohen_test_df,\n cohen_base_path=cohen_basepath,\n transform=transforms\n)\n\n\nbenchmark_data = {\n 'arch': [], 'pretrain': [], 'train': [],\n 'test': [], 'accuracy': [], 'auc': [],\n 'sensitivity': [], 'specificity': [], 'fscore': [],\n 'ba': [], 'missrate': [], 'dor': []\n}\n\ndef benchmark_dataset(dataset, title, fname, testname, xlabels, ylabels=None):\n global benchmark_data\n print(f'Benchmarking {title}.. ')\n\n dataloader = torch.utils.data.DataLoader(\n dataset, batch_size=10,\n shuffle=False, num_workers=4,\n )\n\n tracked_metrics = [\n metrics.Accuracy(),\n metrics.RocAuc(),\n metrics.FScore()\n ]\n\n logs, cm = trainer.test(\n model=model, test_dataloader=dataloader,\n criterion=criterion, metrics=tracked_metrics, device=device\n )\n\n with open(f'logs/{vars.corda_version}/{name}/{fname}-metric.txt', 'w') as f:\n f.write(f'{fname}: ' + trainer.summarize_metrics(logs) + '\\n')\n\n ax = sns.heatmap(\n cm.get(normalized=True), annot=True, fmt=\".2f\",\n xticklabels=xlabels, yticklabels=ylabels or xlabels,\n vmin=0., vmax=1.\n )\n ax.set_title(title)\n plt.xlabel('predicted')\n plt.ylabel('ground')\n hm = ax.get_figure()\n hm.savefig(f'logs/{vars.corda_version}/{name}/{fname}.png')\n hm.clf()\n\n fpr, tpr, thresholds = tracked_metrics[1].get_curve()\n auc = tracked_metrics[1].get()\n f = plt.figure()\n plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (auc = {auc:.2f})')\n plt.title(f'{title} ROC')\n plt.legend(loc='lower right')\n plt.savefig(f'logs/{vars.corda_version}/{name}/{fname}-roc.png')\n plt.clf()\n plt.cla()\n plt.close()\n\n specificity, fpr, fnr, sensitivity = cm.get(normalized=True).ravel()\n dor = (sensitivity*specificity)/((1-sensitivity)*(1-specificity))\n fscore = tracked_metrics[2].get()\n ba = (sensitivity+specificity)/2.\n\n data = {\n 'arch': args.arch, 'pretrain': args.pretrain, 'train': args.train.upper(),\n 'test': testname, 'accuracy': tracked_metrics[0].get(), 'auc': auc,\n 'sensitivity': sensitivity, 'specificity': specificity, 'fscore': fscore,\n 'ba': ba, 'missrate': fnr, 'dor': dor\n }\n\n for k,v in data.items():\n benchmark_data[k].append(v)\n\n\nbenchmark_dataset(corda_test_dataset, f'CORDA {preprocessed}', 'corda', 'CORDA', ['covid-', 'covid+'])\nbenchmark_dataset(corda_rxpos_dataset, f'CORDA RX+ {preprocessed}', 'corda-rx+', 'CORDA RX+', ['covid-', 'covid+'])\nbenchmark_dataset(corda_rxneg_dataset, f'CORDA RX- {preprocessed}', 'corda-rx-', 'CORDA RX-', ['covid-', 'covid+'])\nbenchmark_dataset(rxpos_dataset, f'Test {args.train.upper()} RX+ {preprocessed}', f'test-{args.train}-rx+', f'{args.train.upper()} RX+', ['covid-', 'covid+'])\nbenchmark_dataset(cohen_all_dataset, f'Cohen (All) {preprocessed}', 'cohen-all', 'Cohen (All)', ['covid-', 'covid+'])\nbenchmark_dataset(cohen_test_dataset, f'Cohen (Test) {preprocessed}', 'cohen-test', 'Cohen', ['covid-', 'covid+'])\n\nrsna_test_df['covid'] = rsna_test_df['rx']\nbenchmark_dataset(rsna_test_dataset, f'RSNA {preprocessed}', 'rsna', 'RSNA', ['covid-', 'covid+'], ['rx-', 'rx+'])\n\nchestxray_test_df['covid'] = chestxray_test_df['rx']\nbenchmark_dataset(chest_test_dataset, f'ChestXRay {preprocessed}', 'chestxray', 'ChestXRay', ['covid-', 'covid+'], ['rx-', 'rx+'])\n\npd.DataFrame(benchmark_data).to_csv(f'logs/{vars.corda_version}/{name}/benchmark.csv')\n","sub_path":"train-covid-classifier-unprocessed.py","file_name":"train-covid-classifier-unprocessed.py","file_ext":"py","file_size_in_byte":18342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"54842033","text":"from flask_restful import Resource, reqparse\nfrom flask import make_response, render_template, redirect\nfrom models.micro_model import MicroModel\nfrom datetime import datetime\nimport resources.home_resource as h\nimport sqlite3\nfrom sqlite3 import OperationalError\n\n\nclass MicroResource(Resource):\n \"\"\"Communication for main micro requests\"\"\"\n parse = reqparse.RequestParser()\n parse.add_argument('employee',\n type=str,\n )\n parse.add_argument('model',\n type=str,\n )\n parse.add_argument('rp_serial',\n type=str,\n )\n parse.add_argument('ins_type',\n type=str,\n )\n parse.add_argument('rec_date',\n type=str,\n )\n parse.add_argument('start_date',\n type=str,\n )\n parse.add_argument('accessories',\n type=str,\n )\n parse.add_argument('appearance',\n type=str,\n )\n parse.add_argument('functions',\n type=str,\n )\n parse.add_argument('cleaning',\n type=str,\n )\n parse.add_argument('complete',\n type=str,\n )\n parse.add_argument('notes',\n type=str,\n )\n\n def get(self):\n try:\n connection = sqlite3.connect('data.db')\n except OperationalError:\n connection = sqlite3.connect('data.db')\n cursor = connection.cursor()\n query = cursor.execute('SELECT version FROM version')\n check = str(query.fetchone()[0])\n connection.close()\n if h.version == check:\n return make_response(render_template('micro_form.html'))\n else:\n return redirect('/shutdown')\n\n def post(self):\n time = datetime.today()\n data = MicroResource.parse.parse_args()\n entry = MicroModel(time, data['employee'], data['model'], data['rp_serial'], data['ins_type'], '', '', '',\n '', '', '', '', '')\n entry.save_to_db()\n return redirect('/microlog')\n\n\nclass MicroLog(Resource):\n \"\"\"Communication for micro log\"\"\"\n def get(self):\n try:\n connection = sqlite3.connect('data.db')\n except OperationalError:\n connection = sqlite3.connect('data.db')\n cursor = connection.cursor()\n query = cursor.execute('SELECT version FROM version')\n check = str(query.fetchone()[0])\n connection.close()\n if h.version == check:\n return make_response(render_template('micro_log.html', data=MicroModel.get_all_data()[::-1]))\n else:\n return redirect('/shutdown')\n\n\nclass MicroEdit(Resource):\n \"\"\"Communication for micro edit form\"\"\"\n def get(self, id, name):\n obj = MicroModel.get_single_data(id)\n try:\n connection = sqlite3.connect('data.db')\n except OperationalError:\n connection = sqlite3.connect('data.db')\n cursor = connection.cursor()\n query = cursor.execute('SELECT version FROM version')\n check = str(query.fetchone()[0])\n connection.close()\n if h.version == check:\n return make_response(render_template('Micro_edit.html', data=obj))\n else:\n return redirect('/shutdown')\n\n def post(self, id, name):\n parse = reqparse.RequestParser()\n parse.add_argument('notes',\n type=str)\n data = parse.parse_args()\n time = datetime.today()\n if name == 'rec_date':\n MicroModel.update_rec_date(id, time)\n elif name == 'start_date':\n MicroModel.update_start_date(id, str(time))\n elif name == 'accessories':\n MicroModel.update_accessories(id, 'stuff')\n elif name == 'appearance':\n MicroModel.update_appearance(id, 'good')\n elif name == 'functions':\n MicroModel.update_functions(id, 'funcs')\n elif name == 'cleaning':\n MicroModel.update_cleaning(id, 'complete')\n elif name == 'complete':\n MicroModel.update_complete(id, str(time))\n elif name == 'notes':\n MicroModel.update_notes(id, data['notes'])\n else:\n pass\n return redirect(f'/micro/{id}/null')\n","sub_path":"resources/micro_resource.py","file_name":"micro_resource.py","file_ext":"py","file_size_in_byte":4527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"457834978","text":"import logging\nimport pandas as pd\n\nfrom datetime import datetime\n\nFILE_PATH = '../data/data.csv'\n\nlogging.basicConfig(filename='../logs/dataProcessing.log',\n level=logging.INFO,\n\t\t\t\t\tformat='%(levelname)s:%(asctime)s:%(message)s',\n\t\t\t\t\tdatefmt=\"%Y-%m-%d %H:%M:%S\")\n\ndef write_report(text:str) -> None:\n\n f = open('../reports/dataProcessing.txt', 'a')\n f.write(datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\") + ' ' + text + '\\n')\n f.close()\n\nlogging.info('READING DATA')\ndata = pd.read_csv(FILE_PATH)\nprint(data.shape)\n\nlogging.info('FILTERING ROWS')\nnew_data = data[(data['category'] != \"'es_contents'\") & (data['category'] != \"'es_food'\") & (data['category'] != \"'es_transportation'\")]\nwrite_report('CATEGORIES : [es_contents, es_food, es_transportation] REMOVED FROM DATA')\nprint(new_data.shape)\n\nlogging.info('FILTERING COLUMNS')\nnew_data.drop(columns=['zipcodeOri', 'zipMerchant'], inplace=True)\nwrite_report('FEATURES : [zipcodeOri, zipMerchant] REMOVED FROM DATA')\nprint(new_data.shape)\n\nlogging.info('CREATING A NEW CSV FILE')\nnew_data.to_csv('../data/dataProcessed.csv', index=False)\nwrite_report('NEW CSV FILE CREATED')\n\nlogging.info('EXIT DATA PROCESSING')\n\n\n","sub_path":"src/dataProcessing.py","file_name":"dataProcessing.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"69749534","text":"import dataclasses\n\nimport pytest\nimport pytorch_lightning as lightning\nimport torch\n\nimport vak.models\n\n\nclass TestDAS:\n def test_model_is_decorated(self):\n assert issubclass(vak.models.DAS,\n vak.models.WindowedFrameClassificationModel)\n assert issubclass(vak.models.DAS,\n vak.models.base.Model)\n assert issubclass(vak.models.DAS,\n lightning.LightningModule)\n\n @pytest.mark.parametrize(\n 'net_config_dataclass',\n [\n vak.nets.das.net.DASNetBengaleseFinchConfig,\n vak.nets.das.net.DASNetFlyMultichannelConfig,\n vak.nets.das.net.DASNetFlySinglechannelConfig,\n vak.nets.das.net.DASNetMarmosetConfig,\n vak.nets.das.net.DASNetMouseConfig,\n ]\n )\n def test_init(self, net_config_dataclass):\n net_config_dict = dataclasses.asdict(net_config_dataclass)\n network = vak.nets.das.net.DASNet(**net_config_dict)\n mock_labelmap = {lbl: str(lbl) for lbl in range(net_config_dict['num_classes'])}\n\n model = vak.models.DAS(labelmap=mock_labelmap, network=network)\n\n assert isinstance(model, vak.models.DAS)\n for attr in ('network', 'loss', 'optimizer'):\n assert hasattr(model, attr)\n assert isinstance(getattr(model, attr),\n getattr(vak.models.das.DAS.definition, attr))\n assert hasattr(model, 'metrics')\n assert isinstance(model.metrics, dict)\n for metric_name, metric_callable in model.metrics.items():\n assert isinstance(metric_callable,\n vak.models.das.DAS.definition.metrics[metric_name])\n\n\n @pytest.mark.parametrize(\n 'net_config_dataclass',\n [\n vak.nets.das.net.DASNetBengaleseFinchConfig,\n vak.nets.das.net.DASNetFlyMultichannelConfig,\n vak.nets.das.net.DASNetFlySinglechannelConfig,\n vak.nets.das.net.DASNetMarmosetConfig,\n vak.nets.das.net.DASNetMouseConfig,\n ]\n )\n def test_forward(self, net_config_dataclass):\n net_config_dict = dataclasses.asdict(net_config_dataclass)\n network = vak.nets.das.net.DASNet(**net_config_dict)\n mock_labelmap = {lbl: str(lbl) for lbl in range(net_config_dict['num_classes'])}\n\n model = vak.models.DAS(labelmap=mock_labelmap, network=network)\n\n FAKE_BATCH_SIZE = 8\n fake_input_shape = (FAKE_BATCH_SIZE,\n net_config_dict['n_audio_channels'],\n net_config_dict['num_samples'])\n fake_input = torch.rand(*fake_input_shape)\n\n out = model(fake_input)\n\n out_shape = out.shape\n assert out_shape[0] == FAKE_BATCH_SIZE\n assert out_shape[1] == net_config_dict['num_classes']\n assert out_shape[2] == net_config_dict['num_samples']\n","sub_path":"tests/test_models/test_das.py","file_name":"test_das.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"454213552","text":"# 1. kafka 配置\n# kafka测试topic\n# KAFKA_PROD_FOOTTOPIC = 'footcom'\n\n# kafka线上topic\nKAFKA_PROD_FOOTTOPIC = 'footInfoProd'\nKAFKA_PROD_BROKERS = ['54.222.152.174:9092','54.222.195.114:9092','52.80.73.74:9092']\nKAFKA_GROUP_ID = 'footInfoTest0809'\n\n# 2. redis配置\n# redis线上地址\n# REDIS_HOST = 'web-service-prod.rawr9u.ng.0001.cnn1.cache.amazonaws.com.cn'\n# redis测试地址\nREDIS_HOST = '54.222.235.154'\nREDIS_PORT = 6379\nREDIS_CONNECT_INFO = {'host':REDIS_HOST,'port':6379,'db':0}\n# kafka 数据转入redis中的队列名 uuid\nREDIS_KAFKA_LIST = 'kafka_redis_list_data'\n# kafka 数据转入redis中的哈希表名 uuid footdata\nREDIS_KAFKA_HASHSET = 'kafka_redis_hash_data'\n# 异常脚数据存入redis的队列名称\nREDIS_LIST_FOOTDATA_EXCEPT = 'redis_list_footdata_except'\n# 模型计算完成数据shop_no uuid sex 进入的redis队列名称\nREDIS_LIST_COMPUTE_RESULT = 'redis_list_compute_result'\n\n# 3.返回接口配置\n# 测试接口地址\nRETURN_PORT_URL = 'http://epoque.epoque.cn/bdp/Bdsendmsgman'\n# 线上接口地址\n# RETURN_PORT_URL = 'http://epoque.epoque.cn/bdp/Bdsendmsg'\n\n# 4. 日志配置\n# 日志测试路径\nLOG_FILE_PATH_KAFKA_REDIS ='D:\\\\recommend\\prodrec\\log\\kafka_redis_'\nLOG_FILE_PATH_RETURN_ABNORMAL = 'D:\\\\recommend\\prodrec\\log\\\\return_abnormal_'\nLOG_FILE_PATH_RETURN_NORMAL = 'D:\\\\recommend\\prodrec\\log\\\\return_normal_'\nLOG_FILE_PATH_UTIL_REDIS = 'D:\\\\recommend\\prodrec\\log\\\\util_redis_'\nLOG_FILE_PATH_FUNC = 'D:\\\\recommend\\prodrec\\log\\\\func_'\n","sub_path":"prodrec/data_convert_cache/configuration_test.py","file_name":"configuration_test.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"515069690","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 19 18:19:42 2020\n\n@author: Rafael\n\"\"\"\n\ndef pega_coordenadas(file_name):\n with open(file_name) as f:\n f_data = f.readlines()\n f.close\n \n linhas = []\n\n for i in range(len(f_data)):\n linhas.append(f_data[i].split(','))\n \n Coordenadas = {}\n Coordenadas['X'] = []\n Coordenadas['Y'] = []\n Coordenadas['Z'] = []\n\n for i in range(len(linhas)):\n x = float(linhas[i][3])\n y = float(linhas[i][4])\n z = float(linhas[i][5])\n\n Coordenadas['X'].append(x)\n Coordenadas['Y'].append(y)\n Coordenadas['Z'].append(z) \n \n return Coordenadas\n\ndef plot_3dcoord(coord_dict):\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n \n x = coord_dict['X']\n y = coord_dict['Y']\n z = coord_dict['Z']\n \n ax.scatter(x,y,z)\n\n\nnome_arquivo = 'PONTOS_ESFERA.txt'\n\ndicionario_esfera = pega_coordenadas(nome_arquivo)\nplot_3dcoord(dicionario_esfera)\n\n\n\n\n\n\n\n\n\n \n \n \n \n \n ","sub_path":"Notebooks/arquivos_auxiliares/plotando_coordenadas.py","file_name":"plotando_coordenadas.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"385562757","text":"# Importing modules/functions needed\n\nfrom random import randint\nfrom os import remove, rename\n\n# Defining userScore function\n\ndef getUserScore(userName):\n try: \n input = open('userScores.txt', mode = 'r')\n for line in input:\n content = line.split(', ')\n if content[0] == userName:\n input.close()\n return content[1]\n input.close()\n return \"-1\"\n except IOError:\n print(\"File userScores.txt was not found. \\n A new file will be created\") \n input = open('userScores.txt', mode = 'w') \n input.close()\n return \"-1\"\n\n# Defining updateUserScore function\n\ndef updateUserScore(newUser, userName, score):\n if newUser:\n input = open('userScores.txt', 'a')\n input.write(userName + ', ' + score)\n input.close()\n \n else:\n input = open('userScores.txt', 'r')\n temp = open('userScores.tmp', 'w')\n for line in input:\n content = line.split(',')\n if content[0] == userName:\n content[1] = score\n line = content[0] + ', ' + content[1] + '\\n'\n temp.write(line) \n \n input.close()\n temp.close()\n \n remove('userScores.txt')\n rename('userScores.tmp', 'userScores.txt')\n\n# Generating mathematical questions\n\ndef generateQuestion():\n operandList = [0,0,0,0,0]\n operatorList = ['','','','','']\n operatorDict = {1:'+', 2:'-', 3:'*', 4:'**'}\n for index in range(0, 5):\n operandList[index] = randint(1, 9)\n for index in range(0, 4):\n if index > 0 and operatorList[index - 1] != '**':\n operator = operatorDict[randint(1, 4)]\n else:\n operator = operatorDict[randint(1, 3)]\n operatorList[index] = operator\n questionString = str(operandList[0])\n for index in range(1,5):\n questionString = questionString + operatorList[index - 1] + str(operandList[index])\n result = eval(questionString)\n questionString = questionString.replace('**','^')\n print ('\\n' + questionString)\n userResult = input('Insert answer here:')\n while True:\n if int(userResult) == result:\n print ('That is correct, well done !')\n return 1\n else:\n print ('Sorry, the right answer is', result)\n return 0\n\n ","sub_path":"myPythonFunctions.py","file_name":"myPythonFunctions.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"296884583","text":"import time\nimport numpy as np \nimport codecs\nimport argparse\nfrom dataset.loader import load_data\nfrom model.Model import Transformer\nfrom trainer import Trainer\nfrom evaluator import Evaluator\nimport model.Constants as Constants\nfrom logger import initialize_exp\nfrom model.LM import LanguageModel\nimport torch\nimport os\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description='Text Simplification')\n\n parser.add_argument(\"--simp_train_path\", type=str, default=\"\")\n parser.add_argument(\"--autoencoder_path\", type=str, default=\"\")\n parser.add_argument(\"--simp_dev_path\", type=str, default=\"\")\n parser.add_argument(\"--comp_train_path\", type=str, default=\"\")\n parser.add_argument(\"--comp_dev_path\", type=str, default=\"\")\n\n parser.add_argument(\"--para_dev_path\", type=str, default=\"\")\n parser.add_argument(\"--frc_path\", type=str, default=\"\")\n parser.add_argument(\"--para_test_path\", type=str, default=\"\")\n parser.add_argument(\"--para_train_path\", type=str, default=\"\")\n parser.add_argument(\"--supervised_rate\", type=int, default=0)\n parser.add_argument(\"--vocab_path\", type=str, default=\"\")\n parser.add_argument(\"--us_pretrain_embedding\", type=int, default=1)\n parser.add_argument(\"--embedding_path\", type=str, default=\"\")\n parser.add_argument(\"--comp_frequent_list\", type=str, default=\"\")\n parser.add_argument(\"--simp_frequent_list\", type=str, default=\"\")\n parser.add_argument(\"--comp_ppdb_rules\", type=str, default=\"\")\n parser.add_argument(\"--simp_ppdb_rules\", type=str, default=\"\")\n parser.add_argument(\"--dump_path\", type=str, default=\"\")\n parser.add_argument(\"--checkpoint_path\", type=str, default=\"\")\n parser.add_argument(\"--name\", type=str, default=\"\")\n parser.add_argument(\"--stoplist_path\", type=str, default=\"\")\n parser.add_argument(\"--use_pretrained_model\", type=int, default=0)\n parser.add_argument(\"--otf_autoencoding\", type=int, default=0)\n parser.add_argument(\"--otf_back_translation\", type=int, default=0)\n\n\n # transformer parameters\n parser.add_argument(\"--emb_dim\", type=int, default=512,\n help=\"Embedding layer size\")\n parser.add_argument(\"--n_enc_layers\", type=int, default=4,\n help=\"Number of layers in the encoders\")\n parser.add_argument(\"--n_dec_layers\", type=int, default=4,\n help=\"Number of layers in the decoders\")\n\n parser.add_argument(\"--dropout\", type=float, default=0,\n help=\"Dropout\")\n parser.add_argument(\"--d_inner\", type=int, default=2048,\n help=\"Transformer fully-connected hidden dim size\")\n parser.add_argument(\"--n_head\", type=int, default=8,\n help=\"encoder_attention_heads\")\n parser.add_argument(\"--d_model\", type=int, default=512,\n help=\"hidden size of transformer, must equal with embedding dim\")\n parser.add_argument(\"--d_k\", type=int, default=8,\n help=\"size of keys\")\n parser.add_argument(\"--d_v\", type=int, default=8,\n help=\"size of value\")\n parser.add_argument(\"--len_max_seq\", type=int, default=100,\n help=\"size of value\")\n\n parser.add_argument(\"--share_encdec_emb\", type=int, default=0,\n help=\"Share encoder embeddings / decoder embeddings\")\n parser.add_argument(\"--share_decpro_emb\", type=int, default=0,\n help=\"Share decoder embeddings / decoder output projection\")\n parser.add_argument(\"--share_output_emb\", type=int, default=0,\n help=\"Share decoder output embeddings\")\n\n parser.add_argument(\"--share_enc\", type=int, default=0,\n help=\"Number of layers to share in the encoders\")\n parser.add_argument(\"--share_dec\", type=int, default=0,\n help=\"Number of layers to share in the decoders\")\n\n # encoder input perturbation\n parser.add_argument(\"--word_shuffle\", type=float, default=0,\n help=\"Randomly shuffle input words (0 to disable)\")\n parser.add_argument(\"--shuffle_mode\", type=str, default=\"\")\n parser.add_argument(\"--drop_type\", type=str, default=\"\")\n parser.add_argument(\"--word_replace\", type=float, default=0,\n help=\"Randomly replace input words (0 to disable)\")\n parser.add_argument(\"--word_dropout\", type=float, default=0,\n help=\"Randomly dropout input words (0 to disable)\")\n parser.add_argument(\"--word_blank\", type=float, default=0,\n help=\"Randomly blank input words (0 to disable)\")\n parser.add_argument(\"--syn_denosing\", type=float, default=0,\n help=\"Use syntactic denosing\")\n\n # training steps\n parser.add_argument(\"--otf_sample\", type=float, default=-1,\n help=\"Temperature for sampling back-translations (-1 for greedy decoding)\")\n parser.add_argument(\"--otf_backprop_temperature\", type=float, default=-1,\n help=\"Back-propagate through the encoder (-1 to disable, temperature otherwise)\")\n parser.add_argument(\"--otf_sync_params_every\", type=int, default=1000, metavar=\"N\",\n help=\"Number of updates between synchronizing params\")\n parser.add_argument(\"--otf_num_processes\", type=int, default=30, metavar=\"N\",\n help=\"Number of processes to use for OTF generation\")\n parser.add_argument(\"--otf_update_enc\", type=int, default=True,\n help=\"Update the encoder during back-translation training\")\n parser.add_argument(\"--otf_update_dec\", type=int, default=True,\n help=\"Update the decoder during back-translation training\")\n parser.add_argument(\"--stopping_criterion\", type=str, default=None)\n\n # training parameters\n parser.add_argument(\"--batch_size\", type=int, default=32,\n help=\"Batch size\")\n parser.add_argument(\"--use_multi_process\", type=int, default=0,\n help=\"use_multi_process\")\n\n parser.add_argument(\"--lambda_xe_mono\", type=int, default=1,\n help=\"Cross-entropy reconstruction coefficient (autoencoding)\")\n parser.add_argument(\"--lambda_xe_para\", type=str, default=\"0\",\n help=\"Cross-entropy reconstruction coefficient (parallel data)\")\n parser.add_argument(\"--lambda_xe_back\", type=str, default=\"0\",\n help=\"Cross-entropy reconstruction coefficient (back-parallel data)\")\n parser.add_argument(\"--lambda_xe_otfd\", type=str, default=\"0\",\n help=\"Cross-entropy reconstruction coefficient (on-the-fly back-translation parallel data)\")\n parser.add_argument(\"--lambda_xe_otfa\", type=str, default=\"0\",\n help=\"Cross-entropy reconstruction coefficient (on-the-fly back-translation autoencoding data)\")\n\n parser.add_argument(\"--epoch_size\", type=int, default=100000,\n help=\"Epoch size / evaluation frequency\")\n parser.add_argument(\"--max_epoch\", type=int, default=100000,\n help=\"Maximum epoch size\")\n parser.add_argument(\"--pretrain_autoencoder\", type=int, default=0)\n parser.add_argument(\"--rl_finetune\", type=int, default=0)\n parser.add_argument(\"--lr\", type=float, default=0.001)\n parser.add_argument(\"--gamma\", type=float, default=0.5)\n parser.add_argument(\"--delta\", type=float, default=0.5)\n parser.add_argument(\"--use_lm\", type=int, default=0)\n parser.add_argument(\"--lm_path\", type=str, default=\"\")\n parser.add_argument(\"--additive\", type=int, default=1)\n parser.add_argument('--simp_drop', type=int, default=0)\n parser.add_argument('--use_check', type=int, default=0)\n\n # freeze network parameters\n parser.add_argument(\"--freeze_enc_emb\", type=int, default=0,\n help=\"Freeze encoder embeddings\")\n parser.add_argument(\"--freeze_dec_emb\", type=int, default=0,\n help=\"Freeze decoder embeddings\")\n # evaluation\n parser.add_argument(\"--eval_only\", type=int, default=0,\n help=\"Only run evaluations\")\n parser.add_argument(\"--beam_size\", type=int, default=0,\n help=\"Beam width (<= 0 means greedy)\")\n return parser\n\n\ndef main(params):\n\n def anneal_function(step, k, x0):\n return float(params.gamma / (1+np.exp(-k*(step-x0))))\n\n logger = initialize_exp(params)\n data = load_data(params)\n params.n_src_vocab = len(data['index2word'])\n params.n_tgt_vocab = len(data['index2word'])\n model = Transformer(params).to(Constants.device)\n\n if params.use_pretrained_model:\n logger.info(\"loading pretrained model\")\n path = os.path.join(params.dump_path, '%s.pth' % params.name)\n model_data = torch.load(path)\n model = model_data['model'].to(Constants.device)\n\n elif params.pretrain_autoencoder == 0:\n logger.info(\"loading pretrained autoencoders\")\n path = os.path.join(params.dump_path, 'autoencoder.pth')\n model_data = torch.load(path)\n model = model_data['model'].to(Constants.device)\n\n if params.use_lm:\n logger.info(\"loading pretrained language model\")\n path = params.lm_path\n lm = torch.load(path).to(Constants.device)\n else:\n lm = None\n\n trainer = Trainer(model, lm, data, params, logger)\n if params.use_check:\n trainer.reload_checkpoint()\n evaluator = Evaluator(trainer.model, lm, data, params)\n\n\n logger.info(\"==================== Eval at Random parameters =====================\")\n #scores = evaluator.eval_all(use_pointer=False)\n\n logger.info(\" ====================== Pretraing Embedding... ====================\")\n\n if params.pretrain_autoencoder > 0:\n for i in range(params.pretrain_autoencoder):\n trainer.enc_dec_step('simp', 'simp')\n trainer.enc_dec_step('comp', 'comp')\n if i % 5000 == 0:\n simp_loss, comp_loss = trainer.print_stats(pretrain=True)\n # score = evaluator.eval_all(use_pointer=False)\n \n logger.info(\"saving model\")\n trainer.save_model(params.name)\n return\n\n logger.info(\"==================== Eval at AutoEncoder Only ====================\")\n scores = evaluator.eval_all(use_pointer=False)\n\n trainer.start_back_translation()\n for ep in range(params.max_epoch):\n logger.info(\" ======================== Start Epoch %i ======================\" % ep)\n trainer.n_sentences = 0\n\n while trainer.n_sentences < params.epoch_size:\n trainer.start_time = time.time()\n\n if params.otf_autoencoding:\n mono_xe = 1\n if mono_xe > 0:\n trainer.enc_dec_step('simp', 'simp', xe=mono_xe)\n trainer.enc_dec_step('comp', 'comp', xe=mono_xe)\n\n if params.supervised_rate > 0:\n trainer.enc_dec_step('comp', 'simp', xe=1, back=False)\n\n if params.otf_back_translation:\n if trainer.n_iter % params.otf_sync_params_every == 0:\n logger.info(\"Synchronize the model parameters\")\n trainer.otf_sync_params()\n\n if not getattr(params, 'started_otf_batch_gen', False):\n otf_iterator = trainer.otf_bt_gen_async()\n params.started_otf_batch_gen = True\n\n if trainer.n_iter % params.otf_sync_params_every == 0:\n trainer.otf_sync_params()\n\n if params.supervised_rate == 0:\n otf_gamma = anneal_function(trainer.n_iter, k=0.00075, x0=10000)\n else:\n otf_gamma = params.gamma\n\n before_gen = time.time()\n batches = next(otf_iterator)\n trainer.gen_time += time.time() - before_gen\n\n for batch in batches:\n trainer.otf_bt(batch, lambda_xe=params.lambda_xe_otfd, gamma=otf_gamma)\n\n trainer.iter()\n\n logger.info(\"*********** Evaluating ***********\")\n scores = evaluator.eval_all(use_pointer=False)\n sari = float(scores['sari'])\n # trainer.model_scheduler.step(sari)\n is_end = trainer.end_epoch(scores)\n\n if is_end:\n break\n\n\nif __name__ == '__main__':\n parser = get_parser()\n params = parser.parse_args()\n main(params)\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"276377721","text":"#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n\r\nimport requests\r\nfrom lxml import etree\r\nfrom pymongo import MongoClient\r\nimport time\r\nfrom fake_useragent import UserAgent\r\nua = UserAgent()\r\n\r\n# 拉勾连续访问,第六页就有问题\r\n# 明天试一下,如果速度慢一些可以么---->可以,一分钟5次是可以的\r\n# 不知道带上Refer能否突破这个限制\r\n# 是不是有个Refer的问题?那个会带着上一次的url的\r\n\r\n\r\ndef mylog(str):\r\n print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())) + ': ' + str)\r\n\r\ndef getHTMLText(url):\r\n try:\r\n header = {\r\n 'Host': 'www.lagou.com',\r\n 'User-Agent': ua.random,\r\n # 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36',\r\n }\r\n mylog(header['User-Agent'])\r\n r = requests.get(url, headers=header, timeout=20)\r\n r.raise_for_status()\r\n r.encoding = r.apparent_encoding\r\n return r.text\r\n except:\r\n return url+' 产生异常'\r\n\r\ndef scanPages(url, page):\r\n client = MongoClient()\r\n db = client.newLagou # 连接test数据库,没有则自动创建\r\n mg_job_table = db.job # 使用set集合,没有则自动创建\r\n\r\n for j in range(page):\r\n mylog('正在爬取第{0}页'.format(j+1))\r\n newUrl = url + '{0}/'.format(j+1)\r\n mylog(newUrl)\r\n html = getHTMLText(newUrl)\r\n\r\n s = etree.HTML(html)\r\n dpnames = s.xpath('//*[@id=\"s_position_list\"]/ul/li/@data-positionname')\r\n comps = s.xpath('//*[@id=\"s_position_list\"]/ul/li/@data-company')\r\n salarys = s.xpath('//*[@id=\"s_position_list\"]/ul/li/@data-salary')\r\n positionids = s.xpath('//*[@id=\"s_position_list\"]/ul/li/@data-positionid')\r\n compids = s.xpath('//*[@id=\"s_position_list\"]/ul/li/@data-companyid')\r\n brs = s.xpath('//*[@id=\"s_position_list\"]/ul/li/div[2]/div[2]/text()') # 简介\r\n adds = s.xpath('//*[@id=\"s_position_list\"]/ul/li/div[1]/div[1]/div[1]/a/span/em/text()') #地址\r\n\r\n\r\n length = len(positionids)\r\n mylog('当前页面共有元素{0}'.format(length))\r\n if length == 0:\r\n print(html)\r\n continue\r\n i = 0\r\n while i < length:\r\n dict = {}\r\n dict['positionid'] = positionids[i]\r\n dict['name'] = dpnames[i]\r\n dict['company'] = comps[i]\r\n dict['salary'] = salarys[i]\r\n dict['companyUrl'] = 'https://www.lagou.com/gongsi/{0}.html'.format(compids[i])\r\n dict['positionUrl'] = 'https://www.lagou.com/jobs/{0}.html'.format(positionids[i])\r\n dict['profile'] = brs[i]\r\n dict['address'] = adds[i]\r\n mg_job_table.insert(dict)\r\n i += 1\r\n mylog('开始等待')\r\n time.sleep(12)\r\n mylog('等待结束')\r\n\r\nif __name__ == '__main__':\r\n url = 'https://www.lagou.com/zhaopin/C++/'\r\n scanPages(url, 10)\r\n","sub_path":"lagou.py","file_name":"lagou.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"63486523","text":"import collections\nimport random\nimport functools\nimport operator\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom action_types import ACTIONS\nfrom consts import TAGS\n\n\n# check_influence_distribution()\n# check_total_tags_distribution()\n# check_random_user_tags_distribution(2)\n# check_distribution_by_tag()\n# check__total_user_actions_probabilities()\n# plot_data_lengths(data=[len(a.posts) for a in model.schedule.agents],\n# title=\"Number of posts\")\n# plot_dictionary(dicts=[a.performed_actions for a in model.schedule.agents],\n# title=\"Performed actions\")\n# plot_data_lengths(data=[len(a.friends) for a in model.schedule.agents],\n# title=\"Total friends\", not_too_long_flag=False,\n# xlabel=\"Number of friends for each user\",\n# ylabel=\"Number of users with y friends\")\n\n\ndef check_influence_distribution(model):\n agent_data = [a.get_influence() for a in model.schedule.agents]\n plt.hist(agent_data)\n plt.title(\"User influence\")\n plt.show()\n\n\ndef check_total_tags_distribution(model):\n agents_dicts = [a.get_interests() for a in model.schedule.agents]\n agents_dicts_sum = {}\n for tag in TAGS:\n agents_dicts_sum[tag] = sum(abs(d[tag]) for d in agents_dicts)\n plt.title(\"Sum of tag interests (abs)\")\n plt.bar(agents_dicts_sum.keys(), agents_dicts_sum.values())\n plt.show()\n\n\ndef check_distribution_by_tag(model):\n agents_dicts = [a.get_interests() for a in model.schedule.agents]\n dict_all = collections.defaultdict(list)\n for tag in TAGS:\n dict_all[tag] = []\n for d in agents_dicts:\n dict_all[tag].append(d[tag])\n plt.title(tag)\n plt.hist(dict_all[tag])\n plt.show()\n\n\ndef check_random_user_tags_distribution(model, number_of_users_to_display):\n for _ in range(number_of_users_to_display):\n agent_dict = random.choice(model.schedule.agents).get_interests()\n plt.bar(agent_dict.keys(), agent_dict.values())\n plt.title(\"Random user interests\")\n plt.show()\n\n\ndef check__total_user_actions_probabilities(model):\n agents_dicts = [a.get_actions_probabilities()\n for a in model.schedule.agents]\n dict_all = collections.defaultdict(list)\n for action in ACTIONS:\n dict_all[action] = []\n for d in agents_dicts:\n dict_all[action].append(d[action])\n plt.title(action)\n plt.hist(dict_all[action])\n plt.show()\n\n\ndef check_number_of_friends_distribution(model):\n agent_data = [a.get_number_of_friends() for a in model.schedule.agents]\n plt.hist(agent_data)\n plt.xlabel(\"Range of friends\")\n plt.ylabel(\"Users with x number of friends\")\n plt.title(\"Number of friends\")\n plt.show()\n\n\n# check_number_of_friends_distribution()\ndef plot_data_lengths(data, title, not_too_long_flag=True, xlabel=\"\", ylabel=\"\"):\n bins = np.arange(0, max(data) + 1.5) - 0.5\n fig, ax = plt.subplots()\n plt.title(title)\n _ = ax.hist(data, bins)\n if not_too_long_flag:\n ax.set_xticks(bins + 0.5)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.show()\n\n\ndef plot_dictionary(dicts, title):\n plt.title(title)\n result = dict(functools.reduce(operator.add,\n map(collections.Counter, dicts)))\n lists = sorted(result.items())\n x, y = zip(*lists)\n plt.bar(x, y)\n plt.show()\n","sub_path":"check_utils.py","file_name":"check_utils.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"232487986","text":"\n\nfrom xai.brain.wordbase.verbs._bully import _BULLY\n\n#calss header\nclass _BULLYING(_BULLY, ):\n\tdef __init__(self,): \n\t\t_BULLY.__init__(self)\n\t\tself.name = \"BULLYING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"bully\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_bullying.py","file_name":"_bullying.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"49182423","text":"\"\"\"empty message\n\nRevision ID: 504b13bcf689\nRevises: e66795e88cf3\nCreate Date: 2022-04-05 09:41:28.088362\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '504b13bcf689'\ndown_revision = 'e66795e88cf3'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('event', schema=None) as batch_op:\n batch_op.add_column(sa.Column('template_id', sa.Integer(), nullable=True))\n batch_op.create_foreign_key(None, 'event_template', ['template_id'], ['id'])\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('event', schema=None) as batch_op:\n batch_op.drop_constraint(None, type_='foreignkey')\n batch_op.drop_column('template_id')\n\n # ### end Alembic commands ###\n","sub_path":"src/migrations/versions/504b13bcf689_add_template_event_link.py","file_name":"504b13bcf689_add_template_event_link.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"810520","text":"\"\"\"https://www.acmicpc.net/problem/11660\"\"\"\n\n\n\"\"\"구간 합 구하기 5\"\"\"\n\n\n# git add week_08/Re_boj_S1_11660_Heegun.py\n# git commit -m \"[김희건] boj 구간 합 구하기 5 [누적합 라이브러리, sys.stdin 미사용시 시간초과\"\n\nfrom itertools import accumulate\nimport sys\n\ninput = sys.stdin.readline\n\nn,m = map(int,input().split())\n\nboard = []\n\n\nfor _ in range(n):\n board.append(list(map(int,input().split())))\n\n\nfor i in range(n):\n board[i] = list(accumulate(board[i]))\nfor y in range(1,n):\n for x in range(n):\n board[y][x] += board[y-1][x]\n\n\nresult = []\n\nfor _ in range(m):\n\n x1, y1, x2, y2 = map(int,input().split())\n\n if x1 == 1 and y1 == 1:\n print(board[x2-1][y2-1])\n elif x1 == 1:\n tmp = board[x2-1][y2-1] - board[x2-1][y1-2]\n print(tmp)\n elif y1 == 1:\n tmp = board[x2-1][y2-1] - board[x1-2][y2-1]\n print(tmp)\n else:\n tmp = board[x2-1][y2-1] - board[x2-1][y1-2] - board[x1-2][y2-1]\n tmp += board[x1-2][y1-2]\n print(tmp)\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n \n\n\n\n\n\n\n\n\n\n","sub_path":"week_08/Re_boj_S1_11660_Heegun.py","file_name":"Re_boj_S1_11660_Heegun.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"305008253","text":"# 5188 / calculate min sum of numbers in route\ndef find(i, j, ln):\n global D, N, mat, minV\n\n if ln > minV:\n return\n\n for di, dj in D:\n ni, nj = i + di, j + dj\n if ni == N - 1 and nj == N - 1:\n if minV >= ln + mat[ni][nj]:\n minV = ln + mat[ni][nj]\n elif ni < N and nj < N:\n find(ni, nj, ln + mat[ni][nj])\n\n\nD = [(0, 1), (1, 0)]\n\nfor T in range(int(input())):\n N = int(input())\n mat = [list(map(int, input().split())) for _ in range(N)]\n\n minV = 10 * (2 * N - 1)\n find(0, 0, mat[0][0])\n\n print(f'#{T + 1} {minV}')\n","sub_path":"Course/Advanced/5188.py","file_name":"5188.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"308239707","text":"import unittest\r\nfrom decimal import Decimal\r\nfrom models.address import AddressModel\r\nfrom models.user import RoleModel, UserModel\r\nfrom tests.base_test import BaseTest\r\n\r\n\r\nclass TestAddress(BaseTest):\r\n \"\"\"Test all methods for the address models\"\"\"\r\n\r\n # noinspection PyArgumentList\r\n def setUp(self):\r\n super(TestAddress, self).setUp()\r\n with self.app_context:\r\n test_user = {\r\n \"email\": \"test_user@test.com\",\r\n \"password\": \"pass\",\r\n \"first_name\": \"test\",\r\n \"last_name\": \"user\",\r\n \"mobile_phone\": \"61234567\",\r\n \"is_active\": True,\r\n \"role_id\": 1,\r\n }\r\n test_role = {\"name\": \"developer\"}\r\n test_address = {\r\n \"address_type\": \"Home\",\r\n \"city\": \"Test City\",\r\n \"district\": \"Test District\",\r\n \"neighborhood\": \"Test Neighborhood\",\r\n \"street\": \"Test Street\",\r\n \"building_name\": \"Test Building\",\r\n \"unit_number\": \"Test Unit\",\r\n \"latitude\": 179.12345,\r\n \"longitude\": 89.12345,\r\n \"is_active\": True,\r\n \"user_id\": 1,\r\n }\r\n self.user = UserModel(**test_user)\r\n self.role = RoleModel(**test_role)\r\n self.address = AddressModel(**test_address)\r\n\r\n def test_init(self):\r\n with self.app_context:\r\n self.assertEqual(self.address.address_type, \"Home\")\r\n self.assertEqual(self.address.city, \"Test City\")\r\n self.assertEqual(self.address.district, \"Test District\")\r\n self.assertEqual(self.address.neighborhood, \"Test Neighborhood\")\r\n self.assertEqual(self.address.street, \"Test Street\")\r\n self.assertEqual(self.address.building_name, \"Test Building\")\r\n self.assertEqual(self.address.unit_number, \"Test Unit\")\r\n self.assertEqual(self.address.latitude, 179.12345)\r\n self.assertEqual(self.address.longitude, 89.12345)\r\n self.assertTrue(self.address.is_active)\r\n self.assertEqual(self.address.user_id, 1)\r\n\r\n def test_find_by_user_id(self):\r\n with self.app_context:\r\n self.role.save_to_db()\r\n user_id = self.user.save_to_db()[\"id\"]\r\n self.address.save_to_db()\r\n self.assertEqual(AddressModel.find_by_user_id(user_id).id, 1)\r\n\r\n def test_json(self):\r\n with self.app_context:\r\n self.role.save_to_db()\r\n user_id = self.user.save_to_db()[\"id\"]\r\n self.address.save_to_db()\r\n self.assertDictEqual(\r\n AddressModel.find_by_user_id(user_id).json(),\r\n {\r\n \"id\": 1,\r\n \"address_type\": \"Home\",\r\n \"city\": \"Test City\",\r\n \"district\": \"Test District\",\r\n \"neighborhood\": \"Test Neighborhood\",\r\n \"street\": \"Test Street\",\r\n \"building_name\": \"Test Building\",\r\n \"unit_number\": \"Test Unit\",\r\n \"latitude\": Decimal(\"179.12345\"),\r\n \"longitude\": Decimal(\"89.12345\"),\r\n \"is_active\": True,\r\n \"user_id\": 1,\r\n },\r\n )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n","sub_path":"tests/models/test_address.py","file_name":"test_address.py","file_ext":"py","file_size_in_byte":3420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"291365195","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 21 08:09:24 2021\n\n@author: illusory\n\"\"\"\n\nimport numpy as np\nimport sklearn as sk\nfrom sklearn import svm\nimport matplotlib.pyplot as plt\n\nclass SVM():\n def __init__(self):\n self.dataset = {\"data\":[],\"label\":[]}\n\n def linerable(self,datasize=500):\n self.dataset[\"label\"] = np.zeros((2 * datasize, 1))\n\n x1 = np.reshape(np.random.normal(1, 0.3, datasize), (datasize, 1))\n y1 = np.reshape(np.random.normal(1, 0.8, datasize), (datasize, 1))\n self.dataset[\"data\"] = np.concatenate((x1, y1), axis=1)\n self.dataset[\"label\"][0:datasize, :] = 0\n\n x2 = np.reshape(np.random.normal(3, 0.4, datasize), (datasize, 1))\n y2 = np.reshape(np.random.normal(3, 0.5, datasize), (datasize, 1))\n self.dataset[\"data\"] = np.concatenate((self.dataset[\"data\"], np.concatenate((x2, y2), axis=1)), axis=0)\n self.dataset[\"label\"][datasize:2 * datasize, :] = 1\n\n def unlinerable(self,datasize=500):\n self.dataset[\"label\"] = np.zeros((2 * datasize, 1))\n\n x1 = np.reshape(np.random.normal(1, 0.9, datasize), (datasize, 1))\n y1 = np.reshape(np.random.normal(1, 0.5, datasize), (datasize, 1))\n self.dataset[\"data\"] = np.concatenate((x1, y1), axis=1)\n self.dataset[\"label\"][0:datasize, :] = 0\n\n x2 = np.reshape(np.random.normal(2, 0.8, datasize), (datasize, 1))\n y2 = np.reshape(np.random.normal(2, 0.8, datasize), (datasize, 1))\n self.dataset[\"data\"] = np.concatenate((self.dataset[\"data\"], np.concatenate((x2, y2), axis=1)), axis=0)\n self.dataset[\"label\"][datasize:2 * datasize, :] = 1\n\n \na = SVM()\na.unlinerable()\ntrain_data,test_data,train_label,test_label =sk.model_selection.train_test_split(a.dataset[\"data\"],a.dataset[\"label\"], random_state=1, train_size=0.5,test_size=0.5)\nclassifier=svm.SVC(C=1,kernel='linear',gamma=100,decision_function_shape='ovr') # ovr:一对多策略\nclassifier.fit(train_data,train_label.ravel()) #ravel函数在降维时默认是行序优先\n#也可直接调用accuracy_score方法计算准确率\nfrom sklearn.metrics import accuracy_score\ntra_label=classifier.predict(train_data) #训练集的预测标签\ntes_label=classifier.predict(test_data) #测试集的预测标签\nx1,x2,x3,x4,y1,y2,y3,y4 = [],[],[],[],[],[],[],[]\nxtrain,ytrain,xtest,ytest = [],[],[],[]\nfor i in range(len(train_data)):\n if train_label[i] == 0 and tra_label[i] == 0:\n x1.append(train_data[i][0])\n y1.append(train_data[i][1])\n elif train_label[i] == 1 and tra_label[i] == 1:\n x2.append(train_data[i][0])\n y2.append(train_data[i][1])\n else:\n xtrain.append(train_data[i][0])\n ytrain.append(train_data[i][1])\nfor i in range(len(test_data)):\n if test_label[i] == 0 and tes_label[i] == 0:\n x3.append(test_data[i][0])\n y3.append(test_data[i][1])\n elif test_label[i] == 1 and tes_label[i] == 1:\n x4.append(test_data[i][0])\n y4.append(test_data[i][1])\n else:\n xtest.append(test_data[i][0])\n ytest.append(test_data[i][1])\nplt.figure(1) \nplt.plot(x1,y1,'.',color = \"g\")\nplt.plot(x2,y2,\".\",color = \"r\")\nplt.plot(xtrain,ytrain,\".\",color = \"b\")\nplt.figure(2)\nplt.plot(x3,y3,'.',color = \"g\")\nplt.plot(x4,y4,\".\",color = \"r\")\nplt.plot(xtest,ytest,\".\",color = \"b\")\nplt.show()\nprint(\"训练集:\", accuracy_score(train_label,tra_label) )\nprint(\"测试集:\", accuracy_score(test_label,tes_label) )\n","sub_path":"SVM/SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"377568480","text":"from django.shortcuts import render\nfrom django.core.paginator import Paginator\nfrom listings.choices import choice_bedrooms, choice_price, choices_state\nfrom .models import Listing\n\n\ndef index(request):\n listing = Listing.objects.order_by('-list_date').filter(is_published=True)\n paginator = Paginator(listing, 6)\n page_number = request.GET.get('page')\n page_listing = paginator.get_page(page_number)\n context = {\n 'listings': page_listing\n }\n return render(request, 'listings/listings.html', context)\n\n\ndef listing(request, listing_id):\n listing = Listing.objects.get(id=listing_id)\n context = {\n 'listing': listing\n }\n # print(\"Data\", context)\n return render(request, 'listings/listing.html', context)\n\n\ndef search(request):\n queryset_list = Listing.objects.order_by('-list_date')\n\n # For keyword Searching\n if 'keywords' in request.POST:\n keywords = request.POST['keywords']\n\n if keywords:\n queryset_list = queryset_list.filter(\n description__icontains=keywords)\n\n # For City Search\n if 'city' in request.POST:\n city = request.POST['city']\n if city:\n queryset_list = queryset_list.filter(city__iexact=city)\n\n # For State filtering\n if 'state' in request.POST:\n state = request.POST['state']\n if state:\n queryset_list = queryset_list.filter(state__iexact=state)\n\n # For Bedrooms Filtering\n if 'bedrooms' in request.POST:\n bedrooms = request.POST['bedrooms']\n if bedrooms:\n queryset_list = queryset_list.filter(bedrooms__lte=bedrooms)\n\n # For Price Filtering\n if 'price' in request.POST:\n price = request.POST['price']\n if price:\n queryset_list = queryset_list.filter(price__lte=price)\n\n context = {\n 'states': choices_state,\n 'prices': choice_price,\n 'bedrooms': choice_bedrooms,\n 'listings': queryset_list,\n 'values': request.POST\n }\n return render(request, 'listings/search.html', context)\n","sub_path":"listings/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"641702873","text":"'''\nA newly designed keypad was tested, where a tester pressed a sequence of n keys, one at a time.\n\nYou are given a string keysPressed of length n, where keysPressed[i] was the ith key pressed in the testing sequence, and a sorted list releaseTimes, where releaseTimes[i] was the time the ith key was released. Both arrays are 0-indexed. The 0th key was pressed at the time 0, and every subsequent key was pressed at the exact time the previous key was released.\n\nThe tester wants to know the key of the keypress that had the longest duration. The ith keypress had a duration of releaseTimes[i] - releaseTimes[i - 1], and the 0th keypress had a duration of releaseTimes[0].\n\nNote that the same key could have been pressed multiple times during the test, and these multiple presses of the same key may not have had the same duration.\n\nReturn the key of the keypress that had the longest duration. If there are multiple such keypresses, return the lexicographically largest key of the keypresses.\n\nInput: releaseTimes = [9,29,49,50], keysPressed = \"cbcd\"\nOutput: \"c\"\nExplanation: The keypresses were as follows:\nKeypress for 'c' had a duration of 9 (pressed at time 0 and released at time 9).\nKeypress for 'b' had a duration of 29 - 9 = 20 (pressed at time 9 right after the release of the previous character and released at time 29).\nKeypress for 'c' had a duration of 49 - 29 = 20 (pressed at time 29 right after the release of the previous character and released at time 49).\nKeypress for 'd' had a duration of 50 - 49 = 1 (pressed at time 49 right after the release of the previous character and released at time 50).\nThe longest of these was the keypress for 'b' and the second keypress for 'c', both with duration 20.\n'c' is lexicographically larger than 'b', so the answer is 'c'.\n'''\n\ndef slowestKey(releaseTimes: list([int]), keysPressed: str) -> str:\n max_press = releaseTimes[0]\n max_press_index = [0]\n for rt in range(1,len(releaseTimes)):\n cur_press = releaseTimes[rt] - releaseTimes[rt-1]\n if max_press < cur_press:\n max_press = cur_press\n max_press_index = [rt]\n elif max_press == cur_press:\n max_press_index.append(rt)\n if len(max_press_index) == 1:\n return keysPressed[max_press_index[0]]\n else:\n max_key = keysPressed[max_press_index[0]]\n for ind in range(1,len(max_press_index)):\n if max_key < keysPressed[max_press_index[ind]]:\n max_key = keysPressed[max_press_index[ind]]\n return max_key\n\nprint(slowestKey([9,29,49,50],\"cbcd\"))\n\n","sub_path":"array/slowest_key.py","file_name":"slowest_key.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"121902012","text":"#!/usr/bin/env python\n# coding: utf-8\nimport math\n\nimport numpy as np\n\nfrom tools.dataset import PtData\nfrom tools.loss_grad import LossGradFunEval, LossAndGradients\n\n\nclass MultiClassLogisticRegressionLossGradFunEval(LossGradFunEval):\n\n def is_differential(self) -> bool:\n return True\n\n def _ptwise_loss_and_grad(self, param, data: PtData) -> LossAndGradients:\n \"\"\"\n return point wise loss and grad\n for multi class logistic regression\n\n C: number of classes\n param \\in R^{C \\times dim(x)} (np.array in shape (C x dim(x))\n \"\"\"\n\n x = data.input\n y = data.target # in [0, 1, 2, ..., C]\n\n C, x_dim = param.shape\n\n logits = np.dot(param, x)\n # print(logits)\n # print(logits.shape)\n log_sum_exp = math.log(np.sum(np.exp(logits)))\n loss = - logits[y] + log_sum_exp\n\n grad = []\n for i in range(C):\n post = math.exp(logits[i] - log_sum_exp)\n if i == y:\n grad_per_class = x * (post - 1.0)\n grad.append(grad_per_class)\n else:\n grad_per_class = x * post\n grad.append(grad_per_class)\n\n grad = np.array(grad)\n return LossAndGradients(loss, grad)","sub_path":"tools/multiclass/multiclass_logistic_loss_grad.py","file_name":"multiclass_logistic_loss_grad.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"167736034","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('akvant', '0006_entry_created_at'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='course',\n name='needs',\n field=models.CharField(max_length=255, default=''),\n preserve_default=False,\n ),\n ]\n","sub_path":"akvant/migrations/0007_course_needs.py","file_name":"0007_course_needs.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"360985722","text":"def VersusEnableTimeScheduleParam(json):\n this={}#VersusEnableTimeScheduleParamjson)\n if 'begin_time' in json:\n this['mBegin'] = json['begin_time']\n if 'open_time' in json:\n this['mOpen'] = json['open_time']\n if 'quest_iname' in json:\n this['mQuestIname'] = json['quest_iname']\n #try\n #if(json.add_date!=null)\n #this.mAddDateList=newList()\n #for(intindex=0index]\n [--flash=]\n [--start=]\n [--pause=]\n [--boot=]\n arduino_launch_talker.py (-h | --help)\n arduino_launch_talker.py --version\n\nOptions:\n -h, --help show help\n --version show version\n --rate=N Frequence du message ROS (en Hz)\n [default: 1.00]\n --topic=TOPIC Topic name for ROS publisher\n [default: /Arduino/commands]\n --gps_time= GPS time to send\n --flash=BOOL set LEDs state\n --start=BOOL set Start state\n --pause=BOOL set Pause state\n --boot=BOOL set boot state\n\"\"\"\n\nfrom docopt import docopt\ntry:\n from schema import Schema, And, Or, Use, SchemaError\nexcept ImportError:\n exit('This example requires that `schema` data-validation library'\n ' is installed: \\n pip install schema\\n'\n 'https://github.com/halst/schema')\n\nimport rospy\n# from sbg_driver.msg import gps\n# from ros_arduino.msg import commands\nfrom arduino_msgs.msg import commands\n\n\n_convert_to_bool = {\n 'true': True,\n '1': True,\n 'on': True,\n 'false': False,\n '0': False,\n 'off': False\n}\n\n_keys_for_booleans = ('true', 'false', '1', '0', 'on', 'off')\n\n\ndef talker(**kwargs):\n topic = kwargs.get('--topic', '/Arduino/commands')\n pub = rospy.Publisher(topic, commands, queue_size=2)\n\n rospy.init_node('custom_talker', anonymous=True)\n\n msg = commands()\n\n # GPS time\n gps_time_from_args = kwargs['--gps_time']\n update_clock = gps_time_from_args is not None\n print(\"update_clock: %s\" % update_clock)\n msg.update_clock = update_clock\n # faudrait swizzler les parametres\n msg.t2_t3_t4 = gps_time_from_args if update_clock else [0] * 3\n\n # States\n msg.state_flash = kwargs['--flash'] if kwargs['--flash'] else False\n msg.state_start = kwargs['--start'] if kwargs['--start'] else False\n msg.state_pause = kwargs['--pause'] if kwargs['--pause'] else False\n msg.state_boot = kwargs['--boot'] if kwargs['--boot'] else False\n\n #\n r = rospy.Rate(kwargs.get(\"--rate\", 1.0))\n while not rospy.is_shutdown():\n rospy.loginfo(msg)\n pub.publish(msg)\n r.sleep()\n\n\nif __name__ == '__main__':\n args = docopt(\n __doc__,\n version='1.0.0'\n )\n\n # TODO: il faudrait verifier que le topic transmis en argument\n # soit un topic (subscriber) actif ROS.\n # urls:\n # - https://github.com/docopt/docopt/issues/52\n schema_for_boolean = Or(None,\n And(\n lambda n: n.lower() in _keys_for_booleans,\n Use(lambda l: _convert_to_bool[l.lower()])\n ),\n error='--flash=BOOL should be boolean.')\n schema = Schema({\n '--help': Or(None, And(Use(bool), lambda n: True)),\n '--version': Or(None, And(Use(bool), lambda n: True)),\n '--rate': Or(None, And(Use(float), lambda n: 0.0 <= n < 100.0),\n error='--rate=N should be float 0.0 <= N <= 100.0'),\n '--topic': Or(None, And(Use(str), lambda n: True),\n error='--topic=TOPIC should be string of ROS topic'),\n '--gps_time': Or(None, And(Use(lambda s: map(int, s.split(':'))),\n lambda l: len(l) == 3),\n error='--gps_time='),\n '--flash': schema_for_boolean,\n '--start': schema_for_boolean,\n '--pause': schema_for_boolean,\n '--boot': schema_for_boolean,\n }\n )\n try:\n args = schema.validate(args)\n except SchemaError as e:\n exit(e)\n\n print(\"args: %s\" % args)\n\n try:\n talker(**args)\n except rospy.ROSInterruptException:\n pass\n","sub_path":"project/pipeline/project1/arduino_launch_talker.py","file_name":"arduino_launch_talker.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"124046970","text":"import sklearn\nfrom sklearn import datasets\nfrom sklearn import svm\nfrom sklearn import metrics\nfrom sklearn.neighbors import KNeighborsClassifier\n\ncancer = datasets.load_breast_cancer()\nprint(cancer.feature_names)\nprint(cancer.target_names)\n\nx = cancer.data\ny= cancer.target\nx_train,x_test,y_train, y_test = sklearn.model_selection.train_test_split(x, y, test_size=0.2)\n\nclasses = ['malignant', 'benign']\n\n#kernel and soft margin taken as parameters, C is softmargin\nclf = svm.SVC(kernel=\"linear\", C=2)\n\nclf.fit(x_train, y_train)\n\npredictions = clf.predict(x_test)\n\nacc = metrics.accuracy_score(y_test, predictions)\n\nprint(acc)\n\n#Takes in the amount of neighbors\nmodel = KNeighborsClassifier(13);\nmodel.fit(x_train, y_train)\nacc = model.score(x_test, y_test)\nprint(acc)\n","sub_path":"SVM/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"124518326","text":"from scarches.models import CVAE\n\n\nclass scArchesNB(CVAE):\n \"\"\"\n scArches network with NB for loss function class. This class contains the implementation of scNet network.\n\n Parameters\n ----------\n x_dimension: int\n number of gene expression space dimensions.\n n_conditions: list\n list of unique conditions (i.e. batch ids) in the data used for one-hot encoding.\n z_dimension: int\n number of latent space dimensions.\n task_name: str\n name of the task.\n\n kwargs:\n `learning_rate`: float\n scNet's optimizer's step size (learning rate).\n `alpha`: float\n KL divergence coefficient in the loss function.\n `eta`: float\n Reconstruction coefficient in the loss function.\n `dropout_rate`: float\n dropout rate for Dropout layers in scNet's architecture.\n `model_path`: str\n path to save model config and its weights.\n `clip_value`: float\n Optimizer's clip value used for clipping the computed gradients.\n `output_activation`: str\n Output activation of scNet which Depends on the range of data.\n `use_batchnorm`: bool\n Whether use batch normalization in scNet or not.\n `architecture`: list\n Architecture of scNet. Must be a list of integers.\n `gene_names`: list\n names of genes fed as scNet's input. Must be a list of strings.\n \"\"\"\n\n def __init__(self, x_dimension, conditions, task_name=\"unknown\", z_dimension=10, **kwargs):\n kwargs.update({'loss_fn': 'nb', 'beta': 0,\n \"model_name\": \"cvae_nb\", \"class_name\": \"scArchesNB\"})\n super().__init__(x_dimension, conditions, task_name, z_dimension, **kwargs)\n\n\n @classmethod\n def from_config(cls, config_path, new_params=None, compile=True, construct=True):\n \"\"\"create ``CVAE_NB`` object from exsiting ``CVAE_NB``'s config file.\n\n Parameters\n ----------\n config_path: str\n Path to class' config json file.\n new_params: dict, optional\n Python dict of parameters which you wanted to assign new values to them.\n compile: bool\n ``True`` by default. if ``True``, will compile class' model after creating an instance.\n construct: bool\n ``True`` by default. if ``True``, will construct class' model after creating an instance.\n \"\"\"\n import json\n with open(config_path, 'rb') as f:\n class_config = json.load(f)\n\n class_config['construct_model'] = construct\n class_config['compile_model'] = compile\n\n if new_params:\n class_config.update(new_params)\n\n return cls(**class_config)\n","sub_path":"scarches/models/scarchesnb.py","file_name":"scarchesnb.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"329382723","text":"# -*- coding: utf-8 -*\n\"\"\"\nFor 'layered' aquifer.\n\nDistance to the river will be taken from the numbering of the obeservation\npoints. Results will be saved in a results.csv. This has to be conbined\nafterwards for postprocessing.\nFor the polyline at location == aquifer length the diffusivity is derived\nwith the spectral analysis of the baseflow.\n\nParameters\n----------\n\naquifer_length\naquifer_thickness\nwhich\ncomment\n# the number of the curve (1st == 1)\nrecharge_rfd\n\npath_to_multiple_projects as first argument\nnumber of cores as second argument\n\n\"\"\"\n# ------------------------------------------------------------------------------\n# python 2 and 3 compatible\nfrom __future__ import division\n# ------------------------------------------------------------------------------\n\n# import modules\nimport time\nimport sys\nimport numpy as np\nimport os\nimport pandas as pd\nimport os.path\nfrom mpi4py import MPI\nfrom scipy.stats import hmean,gmean\n\n\n# add search path for own modules\nsys.path.append(\"/Users/houben/PhD/python/scripts/spectral_analysis\")\n# add search path for owdn modules on eve\nsys.path.append(\"/home/houben/python/scripts/spectral_analysis\")\n\n# own modules\nfrom transect_plot import extract_timeseries, plot_head_timeseries_vs_recharge, extract_rfd\nfrom calc_tc import calc_tc\nfrom processing import *\nfrom power_spectrum import power_spectrum\nfrom plot_power_spectra import plot_spectrum\nfrom get_obs import get_obs\nfrom get_ogs_parameters import get_ogs_parameters, get_kf_from_blocks\nfrom shh_analytical import shh_analytical_fit, shh_analytical\nfrom plot_fitting_results import plot_errors_vs_loc_hetero, plot_parameter_vs_location\nfrom tools import get_ogs_folders\nfrom calculate_flow import plot_recharge_vs_baseflow, get_baseflow_from_polyline\nfrom tools import get_ogs_task_id\nfrom transfer_functions import discharge_ftf_fit, discharge_ftf\n# ------------------------------------------------------------------------------\n# set some arameters for the analysis manually\n# ------------------------------------------------------------------------------\naquifer_length = 1000\naquifer_thickness = 30\nwhich = \"mean\"\n# how many layers\nnumber_of_blocks = 30\n# convergence criterion: Series of Shh analytical will be truncated when next\n# iteration adds less than this relative value\nconvergence = 0.1\n# the number of the curve (1st == 1)\nrecharge_rfd = 1\n# m an,d n are only taken into account if shh_anlytical_man is used. shh_analytical\n# also ,has m and n as arguments but is not using them.\nm = None\nn = None\ncomment = \"1_\" # give a specific comment for the analysis e.g. \"parameterset1_\"\n# set cut index and limit recharge and head time series to the first #cut_index values\n# set it to None to take all values\ncut_index = None\n# plot the power spectrum normalized by recharge or not\nnorm = False\n# ------------------------------------------------------------------------------\n# some parameters for the mpi run\n# ------------------------------------------------------------------------------\n# get the number of slots from a system argument\nslots = int(sys.argv[2])\ncomm = MPI.COMM_WORLD\nsize = comm.Get_size()\nrank = comm.Get_rank()\n\n\"\"\"\nDescription:\n\n- Time series will be loaded and all necessary parameters just from the ogs files\n and stored in array.\n- kf values will be taken from the mmp file and the means will be stored in the dataframe.\n- preprocessing on time series should be considered? Detrending?\n- power spectrum will be calculated\n- fit of power spectrum and parameters will be stored in results.csv.\n\nRequirements\n------------\n- obs_points should be formatted like the following: 'obs_00100' with x = 100\n- Recharge time series mus me stored in: rfd_curve#1_y_values.txt !!!!\n- Ensure that there is no other folder in the directory except for the OGS mode runs.\n\nYields\n------\ndataframe : n_observation_points x n_parameters\n name : Name of OGS model run (project_folder)\n S_in : input storativity\n T_in_ari : input transmissivity\n T_in_geo : input transmissivity\n T_in_har : input transmissivity\n D_in_ari : input Diffusivity\n D_in_geo : input Diffusivity\n D_in_har : input Diffusivity\n S_out : output storativity from shh_analytical_fit\n T_out : output transmissivity from shh_analytical_fit\n tc_out : output characteristic time scale calculatet from T_out and S_out\n cov : covariance matrix of fit\n obs_loc : location of the observation point. loc = 0 : water divide\n time_step_size : size of time step in seconds [s]\n time_steps : number of time steps\n model_period : Modelling period in days [d]\n which : Screening deapth of observation point. \"mean\", \"min\", \"max\"\n recharge : type of recharge\n aquifer_length : aquifer_length\n aquifer_thickness : aquifer_thickness\n D_out : derived Diffusivity from SA of basflow\n D_cov : Covariance of fit of Diffusivity\n\"\"\"\n\n# specify the path to the parent directory of multiple OGS model runs\ntry:\n path_to_multiple_projects = sys.argv[1]\nexcept IndexError:\n print(\"You forgot to give the path to multiple projects as argument...\")\n path_to_multiple_projects = input(\"Insert path to multiple projects: \")\n\n# get a list of all directories containing OGS model runs\nproject_folder_list = get_ogs_folders(path_to_multiple_projects)\n\n# remove folder \"fitting_results\" from list and sort\ntry:\n project_folder_list.remove(\"fitting_results\")\nexcept ValueError:\n pass\nproject_folder_list.sort()\n\n# initiate the dataframe\npd.set_option(\"precision\", 10)\ncolumns = [\n \"name\",\n \"S_in\",\n \"T_in_ari\",\n \"T_in_geo\",\n \"T_in_har\",\n \"D_in_ari\",\n \"D_in_geo\",\n \"D_in_har\",\n \"T_out\",\n \"S_out\",\n \"tc_out\",\n \"cov\",\n \"obs_loc\",\n \"time_step_size\",\n \"time_steps\",\n \"model_period\",\n \"which\",\n \"recharge\",\n \"aquifer_length\",\n \"aquifer_thickness\",\n \"D_out\",\n \"D_cov\",\n]\n\n# outer loop over all project_folders containing OGS model runs\nfor i, project_folder in enumerate(project_folder_list):\n if i%slots == rank:\n time_1_folder_begin = time.time()\n # initialize the dataframe\n results = pd.DataFrame(columns=columns)\n print(\"###################################################################\")\n print(\"Starting spectral analysis for folder \" + project_folder + \" on rank \" + str(rank))\n print(\"###################################################################\")\n path_to_project = path_to_multiple_projects + \"/\" + project_folder\n # get list of observation points in current porject_folder\n obs_point_list = get_obs(path_to_project, without_max=False)[1]\n obs_loc_list = get_obs(path_to_project, without_max=False)[2]\n # check if time series for different observation points have already been extracted\n checker = []\n for item in obs_point_list:\n if os.path.exists(str(path_to_project) + \"/\" + \"head_ogs_\" + str(item) + \"_\" + str(which) + \".txt\"):\n checker.append(True)\n else:\n checker.append(False)\n if all(checker) == True and checker != []:\n print(\"All time series have already been extracted. Continuing without checking if content is correct.\")\n else:\n # extract the time series from the tec files\n print(\"Extracting time series...\")\n extract_timeseries(path_to_project, which=\"mean\", process=\"GROUNDWATER_FLOW\")\n # extract the rfd curve\n time_time_series, recharge_time_series = extract_rfd(path=path_to_project, rfd=recharge_rfd)\n # plot the time series vs recharge\n plot_head_timeseries_vs_recharge(path=path_to_project)\n # get a list of kf values from the blocks in the .mmp-file\n kf_list = get_kf_from_blocks(path_to_project,number_of_blocks)\n kf_in_ari, kf_in_geo, kf_in_har = np.mean(kf_list), gmean(kf_list), hmean(kf_list)\n T_in_ari, T_in_geo, T_in_har = kf_in_ari * aquifer_thickness, kf_in_geo*aquifer_thickness, kf_in_har*aquifer_thickness\n # write OGS input parameters in DataFrame, but don't return kf because it is ditrubuted\n Ss_in, time_step_size, time_steps = get_ogs_parameters(path_to_project, noKf=True)\n S_in = Ss_in * aquifer_thickness\n # make directory for results\n path_to_results = (\n path_to_multiple_projects + \"/\" + project_folder + \"/\" + \"spectral_analysis\"\n )\n if not os.path.exists(path_to_results):\n os.mkdir(path_to_results)\n # inner loop over all observation points of current OGS model run\n # change the order of the lists\n #myorder = np.arange(-len(obs_point_list)+1,1)*-1\n #obs_point_list = [obs_point_list[i] for i in myorder]\n #obs_loc_list = [obs_loc_list[i] for i in myorder]\n for j, (obs_point, obs_loc) in enumerate(zip(obs_point_list, obs_loc_list)):\n print(\"###################################################################\")\n print(\"Project folder: \" + project_folder)\n print(\"Observation point: \" + obs_point)\n print(\"Observation point location: \" + str(obs_loc))\n if obs_loc == aquifer_length:\n print(\"Spectral analysis for the baseflow (not every functionality is considered (e.g. cut_index, norm))\")\n # If the current observation point is equal to the aquifer\n # length, it is assumed that this polyline-file contains the\n # velocities to calculate the baseflow. First, the baseflow\n # is calculated and afterwards, the diffusivity is derived\n # with the spectral analysis.\n task_id = get_ogs_task_id(path_to_project)\n baseflow = get_baseflow_from_polyline(task_id, path_to_project, path_to_project + \"/\" + task_id + \"_ply_obs_01000_t\" + str(len(obs_point_list)) + \"_GROUNDWATER_FLOW.tec\")\n # multiply the recharge time series with the aquifer length to get the total inflow\n recharge = recharge_time_series * aquifer_length\n try:\n D_out, D_cov, frequency, Sqq = discharge_ftf_fit(recharge, baseflow, time_step_size, aquifer_length)\n except RuntimeError:\n print(\"Optimal parameters not found...\")\n D_out[0], D_cov[0] = [np.nan, np.nan], [[np.nan, np.nan],[np.nan, np.nan]]\n print(\"popt and pcov have been set to np.nan\")\n except ValueError:\n print(\"either ydata or xdata contain NaNs, or if incompatible options are used\")\n D_out[0], D_cov[0] = [np.nan, np.nan], [[np.nan, np.nan],[np.nan, np.nan]]\n except OptimizeWarning:\n print(\"Covariance of the parameters could not be estimated.\")\n #popt, pcov = [np.nan, np.nan], [[np.nan, np.nan],[np.nan, np.nan]]\n\n # add values to dataframe\n print(\"D: \", \"{0:.3e}\".format(D_out[0]))\n print(\"Covariance of fit:\" + str(D_cov[0]))\n\n # fill temporal dataframe for one model run\n results_temp = {\n \"name\": project_folder,\n \"S_in\": S_in,\n \"T_in_ari\": T_in_ari,\n \"T_in_geo\": T_in_geo,\n \"T_in_har\": T_in_har,\n \"D_in_ari\": T_in_ari / S_in,\n \"D_in_geo\": T_in_geo / S_in,\n \"D_in_har\": T_in_har / S_in,\n \"T_out\": np.nan,\n \"S_out\": np.nan,\n \"tc_out\": np.nan,\n \"cov\": np.nan,\n \"obs_loc\": obs_loc,\n \"time_step_size\": time_step_size,\n \"time_steps\": time_steps,\n \"model_period\": time_step_size * time_steps / 86400,\n \"which\": which,\n \"recharge\": get_filename_from_rfd_top_com(path_to_project),\n \"aquifer_length\": aquifer_length,\n \"aquifer_thickness\": aquifer_thickness,\n \"D_out\": D_out[0],\n \"D_cov\": D_cov[0],\n }\n\n results = results.append(other=results_temp, ignore_index=True, sort=False)\n\n # calculate the fitted power spectra\n Sqq_fitted = discharge_ftf(frequency, D_out, aquifer_length)\n\n Sqq_fitted = np.reshape(Sqq_fitted,(len(Sqq_fitted),))\n Sqq = np.reshape(Sqq,(len(Sqq),))\n\n data = np.vstack((Sqq, Sqq_fitted))\n\n labels = [\n \"Sqq numerical\",\n \"Sqq fitted\"\n ]\n\n linestyle = [\"-\", \"-\"]\n # lims = [(1e-9,6e-6),(1e-6,1e5)]\n marker = [\"\", \"\"]\n figtxt = \"OGS Input Parameter: S = %1.3e, D_ari = %1.3e, D_geo = %1.3e, D_har = %1.3e\" % (\n S_in,\n T_in_ari/S_in,\n T_in_geo/S_in,\n T_in_har/S_in\n ) + \"\\nDerived Parameter: D = %1.3e, D_cov = %1.1e\" % (\n D_out[0],\n D_cov[0],\n )\n\n plot_spectrum(\n data,\n frequency,\n labels=labels,\n path=path_to_results,\n # lims=lims,\n linestyle=linestyle,\n marker=marker,\n heading=\"Folder: \" + project_folder + \"\\nLocation: \" + str(obs_loc),\n name=\"SA_\"\n + project_folder\n + \"_\"\n + str(obs_loc).zfill(len(str(aquifer_length)))\n + \"_baseflow\",\n figtxt=figtxt,\n comment=comment,\n )\n # break this itearation and continue with next obs point\n continue\n # load head time series\n head_time_series = np.loadtxt(\n path_to_multiple_projects\n + \"/\"\n + project_folder\n + \"/\"\n + \"head_ogs_\"\n + obs_point\n + \"_\"\n + which\n + \".txt\"\n )\n # do some preprocessing on time series\n # ------------------------------------\n # DETREND THE HEAD TIME SERIES?\n\n # cut the time series of head and recharge at a given point\n # ony get the first cut_index values\n head_time_series = head_time_series[:cut_index]\n recharge_time_series = recharge_time_series[:cut_index]\n if cut_index != None:\n print(\n \"Time series have been cut. First \"\n + str(cut_index)\n + \" values remained.\"\n )\n # calculate the power spectrum: Shh/Sww, output/input to PLOT only!\n frequency_oi, Shh_Sww = power_spectrum(\n input=recharge_time_series,\n output=head_time_series,\n time_step_size=time_step_size,\n method=\"scipyffthalf\",\n o_i=\"oi\",\n )\n\n # calculate the power spectrum: Shh, output to FIT with analy solution only!\n frequency, Shh = power_spectrum(\n input=recharge_time_series,\n output=head_time_series,\n time_step_size=time_step_size,\n method=\"scipyffthalf\",\n o_i=\"o\",\n )\n frequency, Sww = power_spectrum(\n input=recharge_time_series,\n output=head_time_series,\n time_step_size=time_step_size,\n method=\"scipyffthalf\",\n o_i=\"i\",\n )\n # fit the power spectrum with the analytical solution\n try:\n popt, pcov = shh_analytical_fit(\n Sww=Sww,\n Shh=Shh,\n f=frequency,\n x=obs_loc,\n m=m,\n n=n,\n L=aquifer_length,\n norm=False,\n convergence=convergence,\n )\n except RuntimeError:\n print(\"Optimal parameters not found...\")\n popt, pcov = [np.nan, np.nan], [[np.nan, np.nan],[np.nan, np.nan]]\n print(\"popt and pcov have been set to np.nan\")\n except ValueError:\n print(\"either ydata or xdata contain NaNs, or if incompatible options are used\")\n popt, pcov = [np.nan, np.nan], [[np.nan, np.nan],[np.nan, np.nan]]\n except OptimizeWarning:\n print(\"Covariance of the parameters could not be estimated.\")\n #popt, pcov = [np.nan, np.nan], [[np.nan, np.nan],[np.nan, np.nan]]\n\n\n # absolute values for popt because T and S are squared in equation\n # of shh_anlytical and negative values are possible\n popt = [abs(i) for i in popt]\n # add values to dataframe\n print(\"S fit: \", \"{0:.3e}\".format(popt[0]))\n print(\"S input: \", \"{0:.3e}\".format(S_in))\n print(\"T fit: \", \"{0:.3e}\".format(popt[1]))\n print(\"T input ari: \", \"{0:.3e}\".format(T_in_ari))\n print(\"T input geo: \", \"{0:.3e}\".format(T_in_geo))\n print(\"T input har: \", \"{0:.3e}\".format(T_in_har))\n print(\"Covariance of fit:\" + str([i for i in pcov]))\n\n # fill temporal dataframe for one model run\n results_temp = {\n \"name\": project_folder,\n \"S_in\": S_in,\n \"T_in_ari\": T_in_ari,\n \"T_in_geo\": T_in_geo,\n \"T_in_har\": T_in_har,\n \"D_in_ari\": T_in_ari/S_in,\n \"D_in_geo\": T_in_geo/S_in,\n \"D_in_har\": T_in_har/S_in,\n \"T_out\": popt[1],\n \"S_out\": popt[0],\n \"tc_out\": calc_tc(aquifer_length, popt[0], popt[1]),\n \"cov\": pcov,\n \"obs_loc\": obs_loc,\n \"time_step_size\": time_step_size,\n \"time_steps\": time_steps,\n \"model_period\": time_step_size * time_steps / 86400,\n \"which\": which,\n \"recharge\": get_filename_from_rfd_top_com(path_to_project),\n \"aquifer_length\": aquifer_length,\n \"aquifer_thickness\": aquifer_thickness,\n \"D_out\": np.nan,\n \"D_cov\": np.nan,\n }\n\n results = results.append(other=results_temp, ignore_index=True, sort=False)\n\n # calculate the fitted power spectra\n Shh_fitted = shh_analytical(\n (frequency, Sww),\n popt[0],\n popt[1],\n obs_loc,\n aquifer_length,\n m=n,\n n=m,\n norm=norm,\n )\n\n if norm == True:\n data = np.vstack((Shh_Sww, Shh_fitted))\n elif norm == False:\n data = np.vstack((Shh, Shh_fitted))\n\n labels = [\n \"Shh numerical\",\n \"Shh fitted\"\n ]\n linestyle = [\"-\", \"-\"]\n # lims = [(1e-9,6e-6),(1e-6,1e5)]\n marker = [\"\", \"d\"]\n figtxt = \"OGS Input Parameter: S = %1.3e, T_ari = %1.3e, T_geo = %1.3e, T_har = %1.3e\" % (\n S_in,\n T_in_ari,\n T_in_geo,\n T_in_har,\n ) + \"\\nDerived Parameter: S = %1.3e, T = %1.3e\" % (\n popt[0],\n popt[1],\n )\n\n plot_spectrum(\n data,\n frequency,\n labels=labels,\n path=path_to_results,\n # lims=lims,\n linestyle=linestyle,\n marker=marker,\n heading=\"Folder: \" + project_folder + \"\\nLocation: \" + str(obs_loc),\n name=\"SA_\"\n + project_folder\n + \"_\"\n + str(obs_loc).zfill(len(str(aquifer_length))),\n figtxt=figtxt,\n comment=comment,\n )\n\n\n time_1_folder_end = time.time() - time_1_folder_begin\n print(\"Ready! \" + str(time_1_folder_end) + \" s elapsed for \" + project_folder + \"...\")\n # set path to results incl file name of results\n path_to_results_df = path_to_results + \"/\" + comment + \"results.csv\"\n # if os.path.isfile(path_to_results_df): # override = true, not necesarry\n results.to_csv(path_to_results_df)\n\n # a few lines to plot the error, variance of the fit also as an error plot, not working yet!!!\n #results[\"cov_numbers\"] = results[\"cov\"].apply(identify_numbers_from_string)\n #results[\"sigma_T\"] = results[\"cov_numbers\"].apply(lambda x: x[3] if x != [] else np.nan)\n #results[\"sigma_S\"] = results[\"cov_numbers\"].apply(lambda x: x[0] if x != [] else np.nan)\n\n plot_parameter_vs_location(path_to_results, results[\"T_out\"], obs_loc_list, y_label=\"T_out\")\n","sub_path":"spectral_analysis/20190917_spectral_analysis_mpi_layered.py","file_name":"20190917_spectral_analysis_mpi_layered.py","file_ext":"py","file_size_in_byte":21228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"567499742","text":"import os\nimport csv\nimport copy\nimport json\nimport torch\nimport numpy as np\nfrom tqdm import tqdm\nfrom os import path as osp\nfrom bootstrap.lib.logger import Logger\nfrom block.datasets.vqa_utils import AbstractVQA\nfrom copy import deepcopy\nimport random\nimport h5py\n\nclass VQACP2(AbstractVQA):\n\n def __init__(self,\n dir_data='data/vqa/vqacp2',\n split='train',\n batch_size=80,\n nb_threads=4,\n pin_memory=False,\n shuffle=False,\n nans=1000,\n minwcount=10,\n nlp='mcb',\n proc_split='train',\n samplingans=False,\n dir_rcnn='data/coco/extract_rcnn',\n dir_cnn=None,\n dir_vgg16=None,\n has_testdevset=False,\n ):\n super(VQACP2, self).__init__(\n dir_data=dir_data,\n split=split,\n batch_size=batch_size,\n nb_threads=nb_threads,\n pin_memory=pin_memory,\n shuffle=shuffle,\n nans=nans,\n minwcount=minwcount,\n nlp=nlp,\n proc_split=proc_split,\n samplingans=samplingans,\n has_valset=True,\n has_testset=False,\n has_testdevset=has_testdevset,\n has_testset_anno=False,\n has_answers_occurence=True,\n do_tokenize_answers=False)\n self.dir_rcnn = dir_rcnn\n self.dir_cnn = dir_cnn\n self.dir_vgg16 = dir_vgg16\n self.load_image_features()\n self.load_original_annotation = False\n\n def add_rcnn_to_item(self, item):\n path_rcnn = os.path.join(self.dir_rcnn, '{}.pth'.format(item['image_name']))\n item_rcnn = torch.load(path_rcnn)\n item['visual'] = item_rcnn['pooled_feat']\n item['coord'] = item_rcnn['rois']\n item['norm_coord'] = item_rcnn['norm_rois']\n item['nb_regions'] = item['visual'].size(0)\n return item\n\n def load_image_features(self):\n if self.dir_cnn:\n filename_train = os.path.join(self.dir_cnn, 'trainset.hdf5')\n filename_val = os.path.join(self.dir_cnn, 'valset.hdf5')\n Logger()(f\"Opening file {filename_train}, {filename_val}\")\n self.image_features_train = h5py.File(filename_train, 'r', swmr=True)\n self.image_features_val = h5py.File(filename_val, 'r', swmr=True)\n # load txt\n with open(os.path.join(self.dir_cnn, 'trainset.txt'.format(self.split)), 'r') as f:\n self.image_names_to_index_train = {}\n for i, line in enumerate(f):\n self.image_names_to_index_train[line.strip()] = i\n with open(os.path.join(self.dir_cnn, 'valset.txt'.format(self.split)), 'r') as f:\n self.image_names_to_index_val = {}\n for i, line in enumerate(f):\n self.image_names_to_index_val[line.strip()] = i\n elif self.dir_vgg16:\n # list filenames\n self.filenames_train = os.listdir(os.path.join(self.dir_vgg16, 'train'))\n self.filenames_val = os.listdir(os.path.join(self.dir_vgg16, 'val'))\n\n\n def add_vgg_to_item(self, item):\n image_name = item['image_name']\n filename = image_name + '.pth'\n if filename in self.filenames_train:\n path = os.path.join(self.dir_vgg16, 'train', filename)\n elif filename in self.filenames_val:\n path = os.path.join(self.dir_vgg16, 'val', filename)\n visual = torch.load(path)\n visual = visual.permute(1, 2, 0).view(14*14, 512)\n item['visual'] = visual\n return item\n\n def add_cnn_to_item(self, item):\n image_name = item['image_name']\n if image_name in self.image_names_to_index_train:\n index = self.image_names_to_index_train[image_name]\n image = torch.tensor(self.image_features_train['att'][index])\n elif image_name in self.image_names_to_index_val:\n index = self.image_names_to_index_val[image_name]\n image = torch.tensor(self.image_features_val['att'][index])\n image = image.permute(1, 2, 0).view(196, 2048)\n item['visual'] = image\n return item\n\n def __getitem__(self, index):\n item = {}\n item['index'] = index\n\n # Process Question (word token)\n question = self.dataset['questions'][index]\n if self.load_original_annotation:\n item['original_question'] = question\n item['question_id'] = question['question_id']\n item['question'] = torch.LongTensor(question['question_wids'])\n item['lengths'] = torch.LongTensor([len(question['question_wids'])])\n item['image_name'] = question['image_name']\n\n # Process Object, Attribut and Relational features\n if self.dir_rcnn:\n item = self.add_rcnn_to_item(item)\n elif self.dir_cnn:\n item = self.add_cnn_to_item(item)\n elif self.dir_vgg16:\n item = self.add_vgg_to_item(item)\n\n # Process Answer if exists\n if 'annotations' in self.dataset:\n annotation = self.dataset['annotations'][index]\n if self.load_original_annotation:\n item['original_annotation'] = annotation\n if 'train' in self.split and self.samplingans:\n proba = annotation['answers_count']\n proba = proba / np.sum(proba)\n item['answer_id'] = int(np.random.choice(annotation['answers_id'], p=proba))\n else:\n item['answer_id'] = annotation['answer_id']\n item['class_id'] = torch.LongTensor([item['answer_id']])\n item['answer'] = annotation['answer']\n item['question_type'] = annotation['question_type']\n\n return item\n\n def download(self):\n dir_ann = osp.join(self.dir_raw, 'annotations')\n os.system('mkdir -p '+dir_ann)\n os.system('wget https://computing.ece.vt.edu/~aish/vqacp/vqacp_v2_train_questions.json -P' + dir_ann)\n os.system('wget https://computing.ece.vt.edu/~aish/vqacp/vqacp_v2_test_questions.json -P' + dir_ann)\n os.system('wget https://computing.ece.vt.edu/~aish/vqacp/vqacp_v2_train_annotations.json -P' + dir_ann)\n os.system('wget https://computing.ece.vt.edu/~aish/vqacp/vqacp_v2_test_annotations.json -P' + dir_ann)\n train_q = {\"questions\":json.load(open(osp.join(dir_ann, \"vqacp_v2_train_questions.json\")))}\n val_q = {\"questions\":json.load(open(osp.join(dir_ann, \"vqacp_v2_test_questions.json\")))}\n train_ann = {\"annotations\":json.load(open(osp.join(dir_ann, \"vqacp_v2_train_annotations.json\")))}\n val_ann = {\"annotations\":json.load(open(osp.join(dir_ann, \"vqacp_v2_test_annotations.json\")))}\n train_q['info'] = {}\n train_q['data_type'] = 'mscoco'\n train_q['data_subtype'] = \"train2014cp\"\n train_q['task_type'] = \"Open-Ended\"\n train_q['license'] = {}\n val_q['info'] = {}\n val_q['data_type'] = 'mscoco'\n val_q['data_subtype'] = \"val2014cp\"\n val_q['task_type'] = \"Open-Ended\"\n val_q['license'] = {}\n for k in [\"info\", 'data_type','data_subtype', 'license']:\n train_ann[k] = train_q[k]\n val_ann[k] = val_q[k]\n with open(osp.join(dir_ann, \"OpenEnded_mscoco_train2014_questions.json\"), 'w') as F:\n F.write(json.dumps(train_q))\n with open(osp.join(dir_ann, \"OpenEnded_mscoco_val2014_questions.json\"), 'w') as F:\n F.write(json.dumps(val_q))\n with open(osp.join(dir_ann, \"mscoco_train2014_annotations.json\"), 'w') as F:\n F.write(json.dumps(train_ann))\n with open(osp.join(dir_ann, \"mscoco_val2014_annotations.json\"), 'w') as F:\n F.write(json.dumps(val_ann))\n\n def add_image_names(self, dataset):\n for q in dataset['questions']:\n q['image_name'] = 'COCO_%s_%012d.jpg'%(q['coco_split'],q['image_id'])\n return dataset\n\n","sub_path":"rubi/datasets/vqacp2.py","file_name":"vqacp2.py","file_ext":"py","file_size_in_byte":7954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"452703458","text":"\"\"\"\nThis is a dumb calculator that can add and subtract whole numbers from zero to five.\nWhen you run the code, you are prompted to enter two numbers (in the form of English\nword instead of number) and the operator sign (also in the form of English word).\nThe code will perform the calculation and give the result if your input is what it\nexpects.\n\nThe code is very long and messy. Refactor it according to what you have learned about\ncode simplicity and efficiency.\n\"\"\"\n\nprint('''Welcome to this calculator!\n It can add and subtract whole numbers from zero to five''')\na = input('Please choose your first number (zero to five): ')\nb = input('What do you want to do? plus or minus: ')\nc = input('Please choose your second number (zero to five): ')\n\ncheck_list = ['zero', 'one', 'two', 'three', 'four', 'five', 'plus', 'minus']\nsum_dict = {'zero': 0, 'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 1: 'one', 2: 'two', 3: 'three', 4: 'four',\n 5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine', 10: 'ten', -1: 'negative one', -2: 'negative two',\n -3: 'negative three', -4: 'negative four', -5: 'negative five'}\n\n\ndef calculate(number1, operator, number2):\n result = \"\"\n if operator == 'plus':\n result = sum_dict[sum_dict[number1] + sum_dict[number2]]\n elif operator == 'minus':\n result = sum_dict[sum_dict[number1] - sum_dict[number2]]\n else:\n print('Error. You should select a correct operator.')\n print(f'{number1} {operator} {number2} equals {result}')\n\n\nif (a not in check_list) or (b not in check_list) or (c not in check_list):\n print(\"I am not able to answer this question. Check your input.\")\nelse:\n calculate(a, b, c)\nprint(\"Thanks for using this calculator, goodbye :)\")\n","sub_path":"your-code/challenge-1.py","file_name":"challenge-1.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"256767851","text":"\"\"\" Custom data generators\"\"\"\n\nimport random\nimport multiprocessing as mp\nfrom itertools import islice, zip_longest\nimport numpy as np\nimport tensorflow_hub as hub\nimport tensorflow as tf\n\nMASK_TOKEN_ID = 0 # try 0 first\nELMO_TF_HUB_URL = 'https://tfhub.dev/google/elmo/2'\n\n\ndef chunker(chunked_size, iterable):\n \"\"\"breaks up the iterable into chunks of size chunked_size\"\"\"\n return zip_longest(*[iter(iterable)]*chunked_size)\n\n\nclass ELMoTFHubGenerator:\n \"\"\"An attempt at wrapping a generator around ELMo TF Hub calls\n Runs on CPU - in practice, since it's running an actual TF graph, too slow to be useable\n \"\"\"\n def __init__(self, data_x, data_y,\n batch_size, shuffle=False,\n num_processes=4):\n self.data_x = data_x\n self.data_y = data_y\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.num_processes = num_processes\n\n def elmo_producer(self,\n in_queue: mp.Queue,\n out_queue: mp.Queue,\n terminate_event: mp.Event):\n \"\"\" Producer called by MP to generate ELMO embeddings from tf hub\"\"\"\n import os\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n elmo = hub.Module(ELMO_TF_HUB_URL, trainable=False)\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True # pylint: disable=no-member\n\n with tf.Session(config=config) as session:\n session.run(tf.global_variables_initializer())\n session.run(tf.tables_initializer())\n\n while not terminate_event.is_set():\n batch_idx = list(in_queue.get(True, timeout=2))\n batch_comments = self.data_x[batch_idx]\n\n batch_embeddings = session.run(elmo(batch_comments,\n signature='default',\n as_dict=True)['elmo'])\n\n if self.data_y is None:\n out_queue.put(batch_embeddings)\n else:\n batch_y = self.data_y[batch_idx]\n out_queue.put((batch_embeddings, batch_y))\n\n def batch_generator(self):\n \"\"\" creates an MP pool to call ELMO tf hub and generate the embeddings\"\"\"\n idx_queue = mp.Queue()\n embeddings_queue = mp.Queue(maxsize=self.num_processes * 2)\n terminate_event = mp.Event()\n pool = mp.Pool(self.num_processes,\n initializer=self.elmo_producer,\n initargs=(idx_queue, embeddings_queue, terminate_event))\n\n try:\n while True:\n idx_list = list(range(len(self.data_x)))\n if self.shuffle:\n random.shuffle(idx_list)\n batched_idx = list(chunker(self.batch_size, idx_list))\n for batch_idx in batched_idx:\n idx_queue.put(batch_idx)\n\n for _ in range(len(batched_idx)):\n yield embeddings_queue.get()\n finally:\n terminate_event.set()\n pool.close()\n pool.join()\n\n\nclass MLMBatchGenerator:\n \"\"\" Creates batches of MLM samples specifically for Toxic\"\"\"\n def __init__(self, data_x, data_y,\n batch_size, last_token_id):\n self.data_x = data_x\n self.data_y = data_y\n self.batch_size = batch_size\n self.last_token_id = last_token_id\n\n def batch_generator(self):\n \"\"\"\n yields 1) the masked sequence of token ids as a feature,\n 2) tuple of the toxic classifier targets,\n and the original token id sequence with positions of masking\n \"\"\"\n samples = self.generate_samples()\n while True:\n next_bunch_of_samples = islice(samples, self.batch_size)\n mask, sequence, masked_sequence, classifier_targets = zip(*list(next_bunch_of_samples))\n combined_label = np.stack([sequence, mask], axis=-1)\n yield (np.array(masked_sequence), [np.array(classifier_targets), combined_label])\n\n def generate_samples(self):\n \"\"\"\n Masks the sequence using odds given by the paper - 15% masking,\n of which 80% are masked with with the MASK token,\n 10% are flipped to another token\n 10% left as the original token\n \"\"\"\n while True:\n idx_list = list(range(len(self.data_x)))\n random.shuffle(idx_list)\n\n for curr_idx in idx_list:\n sequence = self.data_x[curr_idx]\n masked_sequence = sequence.copy()\n output_mask = np.zeros((len(sequence),), dtype=int)\n # since we're applying padding, need to constrain range of masking\n try:\n start_idx = np.flatnonzero(sequence)[0]\n except IndexError: # all zeros - due to tokenizing on cut-down vocab\n continue\n for word_pos in range(start_idx, len(sequence)):\n if random.random() < 0.15:\n dice = random.random()\n if dice < 0.8: # 80% of 15% = mask\n masked_sequence[word_pos] = MASK_TOKEN_ID\n elif dice < 0.9: # 10% of 15% = random\n masked_sequence[word_pos] = random.randint(\n 1, self.last_token_id)\n # else: 10% of 15% just leave the word as is\n output_mask[word_pos] = 1\n yield (output_mask, sequence, masked_sequence, self.data_y[curr_idx])\n","sub_path":"data_generators.py","file_name":"data_generators.py","file_ext":"py","file_size_in_byte":5699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"264371573","text":"from django.shortcuts import render, redirect\r\nfrom django.contrib import messages\r\nfrom django.core.mail import send_mail\r\nfrom django.urls import reverse_lazy\r\nfrom .models import Contact\r\n# Create your views here.\r\n\r\n\r\ndef contact(request):\r\n if request.method == 'POST':\r\n name = request.POST.get('name')\r\n client_email = request.POST.get('email')\r\n subject = request.POST.get('subject')\r\n message = request.POST.get('message')\r\n redirect_url = request.POST.get('redirect')\r\n print(redirect_url)\r\n\r\n contact = Contact(name=name, subject=subject,\r\n email=client_email, message=message)\r\n contact.save()\r\n\r\n # Send email to support team\r\n send_mail(\r\n 'Client Inquiry',\r\n 'There has been an inquiry regarding to DANGUI STAMP MAKERS. Sign into the admin panel for more info.',\r\n 'danguistamp@gmail.com',\r\n ['azizrahman.yarzai0@gmail.com'],\r\n fail_silently=False\r\n )\r\n\r\n # Send email to customer\r\n send_mail(\r\n 'Repaly for your inquiry',\r\n 'Thank You ' + name + ' for your inquiry. Our support team will reach you soon.',\r\n 'danguistamp@gmail.com',\r\n [client_email],\r\n fail_silently=False\r\n )\r\n\r\n messages.success(\r\n request, 'Your request has been submitted, our support team will get back to you soon')\r\n return redirect(redirect_url)\r\n","sub_path":"contacts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"181802929","text":"\nprint (\"Import Required libraries\")\n#####\nimport tensorflowjs as tfjs\nfrom numpy.random import seed\nseed(101)\nfrom tensorflow import set_random_seed\nset_random_seed(101)\nimport pandas as pd\nimport numpy as np\nimport tensorflow\nfrom tensorflow.keras.layers import Dense, Dropout\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.metrics import categorical_crossentropy\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint\n\nimport os\n\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import train_test_split\nimport itertools\nimport shutil\nimport matplotlib.pyplot as plt\n# %matplotlib inline\n\n\n\nprint (\"Loading data\")\n\npath = r'C:\\reet_personal\\hackathon\\input'\nos.chdir(path)\n\nprint (\"Directories\")\n######################\n# base_dir = r'C:\\reet_personal\\hackathon\\input\\base_dir'\n# train_dir = os.path.join(base_dir, 'train_dir')\n# val_dir = os.path.join(base_dir, 'val_dir')\n\n# nv = os.path.join(train_dir, 'nv')\n# mel = os.path.join(train_dir, 'mel')\n# bkl = os.path.join(train_dir, 'bkl')\n# bcc = os.path.join(train_dir, 'bcc')\n# akiec = os.path.join(train_dir, 'akiec')\n# vasc = os.path.join(train_dir, 'vasc')\n# df = os.path.join(train_dir, 'df')\n\n# nv = os.path.join(val_dir, 'nv')\n# mel = os.path.join(val_dir, 'mel')\n# bkl = os.path.join(val_dir, 'bkl')\n# bcc = os.path.join(val_dir, 'bcc')\n# akiec = os.path.join(val_dir, 'akiec')\n# vasc = os.path.join(val_dir, 'vasc')\n# df = os.path.join(val_dir, 'df')\n######################\n\nprint (\"Reading metadata\")\n\ndf_data = pd.read_csv(r'C:\\reet_personal\\hackathon\\input\\HAM10000_metadata.csv')\ndf = df_data.groupby('lesion_id').count()\ndf = df[df['image_id'] == 1]\n\ndf.reset_index(inplace=True)\n\nprint (\"Identifying duplicates\")\n\ndef dup(x):\n \n unique_list = list(df['lesion_id'])\n \n if x in unique_list:\n return 'no_duplicates'\n else:\n return 'has_duplicates'\n \ndf_data['duplicates'] = df_data['lesion_id']\n\n# apply the function to this new column\ndf_data['duplicates'] = df_data['duplicates'].apply(dup)\ndf = df_data[df_data['duplicates'] == 'no_duplicates']\n\ny = df['dx']\n_, df_val = train_test_split(df, test_size=0.17, random_state=101, stratify=y)\n\n\nprint (\"validation rows\")\n\ndef id_val_rows (x):\n # create a list of all the lesion_id's in the val set\n val_list = list(df_val['image_id'])\n \n if str(x) in val_list:\n return 'val'\n else:\n return 'train'\n\n\ndf_data['train_or_val'] = df_data['image_id']\ndf_data['train_or_val'] = df_data['train_or_val'].apply(id_val_rows )\n \ndf_train = df_data[df_data['train_or_val'] == 'train']\n\ndf_train = df_train[:10]\ndf_val = df_val[:10]\n\nprint(len(df_train))\nprint(len(df_val))\n\nprint (\"Train and validation index created\")\n\nprint (\"Score_code_part_1_end, start part 2\")\n\n\n\nprint (\"Score code part 2 start\")\n\n\n# Set the image_id as the index in df_data\ndf_data.set_index('image_id', inplace=True)\n\n# Change the folder name\n###################################################################################################################################\n\n\n# # Change path\n# folder_1 = os.listdir(r'C:\\reet_personal\\hackathon\\input\\ham10000_images_part_1')\n# folder_2 = os.listdir(r'C:\\reet_personal\\hackathon\\input\\ham10000_images_part_2')\n\n# train_list = list(df_train['image_id'])\n# val_list = list(df_val['image_id'])\n\n# print (\"Transfer the train images\")\n\n# # Transfer the train images\n\n# for image in train_list:\n \n # fname = image + '.jpg'\n # label = df_data.loc[image,'dx']\n \n # if fname in folder_1:\n\t\t# # Change path\n # src = os.path.join(r'C:\\reet_personal\\hackathon\\input\\ham10000_images_part_1', fname)\n # # destination path to image\n # dst = os.path.join(train_dir, label, fname)\n # # copy the image from the source to the destination\n # shutil.copyfile(src, dst)\n\n # if fname in folder_2:\n\t\t# # Change path\n # src = os.path.join(r'C:\\reet_personal\\hackathon\\input\\ham10000_images_part_2', fname)\n # # destination path to image\n # dst = os.path.join(train_dir, label, fname)\n # # copy the image from the source to the destination\n # shutil.copyfile(src, dst)\n\n\n# print (\"Transfer the val images\")\n\n# for image in val_list:\n \n # fname = image + '.jpg'\n # label = df_data.loc[image,'dx']\n \n # if fname in folder_1:\n # # Change path\n # src = os.path.join(r'C:\\reet_personal\\hackathon\\input\\ham10000_images_part_1', fname)\n # # destination path to image\n # dst = os.path.join(val_dir, label, fname)\n # # copy the image from the source to the destination\n # shutil.copyfile(src, dst)\n\n # if fname in folder_2:\n # # Change path\n # src = os.path.join(r'C:\\reet_personal\\hackathon\\input\\ham10000_images_part_2', fname)\n # # destination path to image\n # dst = os.path.join(val_dir, label, fname)\n # # copy the image from the source to the destination\n # shutil.copyfile(src, dst)\n\t\t\n\n\t\t\n# # note that we are not augmenting class 'nv'\n# class_list = ['mel','bkl','bcc','akiec','vasc','df']\n\n\n# for item in class_list:\n \n # # create a base dir\n # aug_dir = 'aug_dir'\n # os.mkdir(aug_dir)\n # # create a dir within the base dir to store images of the same class\n # img_dir = os.path.join(aug_dir, 'img_dir')\n # os.mkdir(img_dir)\n\n # # Choose a class\n # img_class = item\n\n # # list all images in that directory\n # img_list = os.listdir('base_dir/train_dir/' + img_class)\n\n # # Copy images from the class train dir to the img_dir e.g. class 'mel'\n # for fname in img_list:\n # # source path to image\n # src = os.path.join('base_dir/train_dir/' + img_class, fname)\n # # destination path to image\n # dst = os.path.join(img_dir, fname)\n # # copy the image from the source to the destination\n # shutil.copyfile(src, dst)\n\n\n # # point to a dir containing the images and not to the images themselves\n # path = aug_dir\n # save_path = 'base_dir/train_dir/' + img_class\n\n # # Create a data generator\n # datagen = ImageDataGenerator(\n # rotation_range=180,\n # width_shift_range=0.1,\n # height_shift_range=0.1,\n # zoom_range=0.1,\n # horizontal_flip=True,\n # vertical_flip=True,\n # #brightness_range=(0.9,1.1),\n # fill_mode='nearest')\n\n # batch_size = 50\n\n # aug_datagen = datagen.flow_from_directory(path,\n # save_to_dir=save_path,\n # save_format='jpg',\n # target_size=(224,224),\n # batch_size=batch_size)\n\n\n\n # # Generate the augmented images and add them to the training folders\n \n # ###########\n \n # num_aug_images_wanted = 6000 # total number of images we want to have in each class\n \n # ###########\n \n # num_files = len(os.listdir(img_dir))\n # num_batches = int(np.ceil((num_aug_images_wanted-num_files)/batch_size))\n\n # # run the generator and create about 6000 augmented images\n # for i in range(0,num_batches):\n\n # imgs, labels = next(aug_datagen)\n \n # # delete temporary directory with the raw image files\n # shutil.rmtree('aug_dir')\n\t\n###################################################################################################################################################\n\ntrain_path = 'base_dir/train_dir'\nvalid_path = 'base_dir/val_dir'\n\nnum_train_samples = len(df_train)\nnum_val_samples = len(df_val)\ntrain_batch_size = 10\nval_batch_size = 10\nimage_size = 224\n\ntrain_steps = np.ceil(num_train_samples / train_batch_size)\nval_steps = np.ceil(num_val_samples / val_batch_size)\n\n\n\nprint (\"Image generator starts\")\n\ndatagen = ImageDataGenerator(\n preprocessing_function= \\\n tensorflow.keras.applications.mobilenet.preprocess_input)\n\ntrain_batches = datagen.flow_from_directory(train_path,\n target_size=(image_size,image_size),\n batch_size=train_batch_size)\n\nvalid_batches = datagen.flow_from_directory(valid_path,\n target_size=(image_size,image_size),\n batch_size=val_batch_size)\n\n# Note: shuffle=False causes the test dataset to not be shuffled\ntest_batches = datagen.flow_from_directory(valid_path,\n target_size=(image_size,image_size),\n batch_size=1,\n shuffle=False)\n\t\t\t\t\t\t\t\t\t\t\t\n\n\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\nprint (\"Model mobilenet import\")\n\t\t\t\t\t\t\t\t\t\t\t\nmobile = tensorflow.keras.applications.mobilenet.MobileNet()\n\n# CREATE THE MODEL ARCHITECTURE\n\n# Exclude the last 5 layers of the above model.\n# This will include all layers up to and including global_average_pooling2d_1\nx = mobile.layers[-6].output\n\n# Create a new dense layer for predictions\n# 7 corresponds to the number of classes\nx = Dropout(0.25)(x)\npredictions = Dense(7, activation='softmax')(x)\n\n\n# inputs=mobile.input selects the input layer, outputs=predictions refers to the\n# dense layer we created above.\n\nmodel = Model(inputs=mobile.input, outputs=predictions)\n\n# We need to choose how many layers we actually want to be trained.\n\n# Here we are freezing the weights of all layers except the\n# last 23 layers in the new model.\n# The last 23 layers of the model will be trained.\n\nfor layer in model.layers[:-23]:\n layer.trainable = False\n\n\n# Define Top2 and Top3 Accuracy\n\nfrom tensorflow.keras.metrics import categorical_accuracy, top_k_categorical_accuracy\n\ndef top_3_accuracy(y_true, y_pred):\n return top_k_categorical_accuracy(y_true, y_pred, k=3)\n\ndef top_2_accuracy(y_true, y_pred):\n return top_k_categorical_accuracy(y_true, y_pred, k=2)\n\n\n\n\nmodel.compile(Adam(lr=0.01), loss='categorical_crossentropy', \n metrics=[categorical_accuracy, top_2_accuracy, top_3_accuracy])\n\n\n\n# Get the labels that are associated with each index\nprint(valid_batches.class_indices)\n\n\nfilepath = \"model.h5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='val_top_3_accuracy', verbose=1, \n save_best_only=True, mode='max')\n\nreduce_lr = ReduceLROnPlateau(monitor='val_top_3_accuracy', factor=0.5, patience=2, \n verbose=1, mode='max', min_lr=0.00001)\n \n \ncallbacks_list = [checkpoint, reduce_lr]\n\nclass_weights={\n 0: 1.0, # akiec\n 1: 1.0, # bcc\n 2: 1.0, # bkl\n 3: 1.0, # df\n 4: 3.0, # mel # Try to make the model more sensitive to Melanoma.\n 5: 1.0, # nv\n 6: 1.0, # vasc\n}\n\n\nhistory = model.fit_generator(train_batches, steps_per_epoch=train_steps, \n class_weight=class_weights,\n validation_data=valid_batches,\n validation_steps=val_steps,\n epochs=1, verbose=1,\n callbacks=callbacks_list)\n\n# Here the the last epoch will be used.\n\n\ntfjs.converters.save_keras_model(model, r\"C:\\reet_personal\\hackathon\\model_output\") \nprint (\"Model output stored\")\n\nprint (\"Validation Results\")\n\n\nval_loss, val_cat_acc, val_top_2_acc, val_top_3_acc = \\\nmodel.evaluate_generator(test_batches, \n steps=len(df_val))\n\nprint('val_loss:', val_loss)\nprint('val_cat_acc:', val_cat_acc)\nprint('val_top_2_acc:', val_top_2_acc)\nprint('val_top_3_acc:', val_top_3_acc)\n\n\n\t\t\t\t\t\t\t\t\t\t\t","sub_path":"score_code_wip_v1.py","file_name":"score_code_wip_v1.py","file_ext":"py","file_size_in_byte":11781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"515238338","text":"class myStack:\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.top = -1\n self.stackArr = []\n\n def isFull(self):\n if self.top >= self.capacity - 1:\n return True\n else:\n return False\n\n def isEmpty(self):\n if self.top < 0:\n return True\n else:\n return False\n\n def printStack(self):\n if self.isEmpty():\n return False\n\n size = self.capacity - 1\n while(size >= 0):\n isTop = \"<- Top\" if size == self.top else \"\"\n if size <= self.top:\n valprint = \"{}{}\".format(self.stackArr[size], \" \") if self.stackArr[size] < 10 else self.stackArr[size]\n else:\n valprint = \" \"\n\n print(\"| {0} | {1}\".format(valprint, isTop))\n size -= 1\n\n def push(self, val):\n if self.isFull():\n return\n\n self.top += 1\n self.stackArr.insert(self.top, val)\n\n def pop(self):\n if self.isEmpty():\n return False\n\n val = self.stackArr[self.top]\n self.stackArr[self.top] = 0\n self.top -= 1\n return val\n\n def peak(self):\n if self.isEmpty():\n return False\n\n return self.stackArr[self.top]\n","sub_path":"CodeingQuestions/CodeingQuestions/myStack.py","file_name":"myStack.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"143557172","text":"import os\nimport argparse\n\nimport pandas as pd\n\n\ndef cifar10_create_file_list(cifat10_path: str) -> pd.DataFrame:\n # Iterate over data batches\n data = pd.DataFrame()\n for i, fpath in enumerate(os.scandir(cifat10_path)):\n data_dict = {'filepath': list(), 'label': list(), 'splitname': list()}\n print(f'At batch: {fpath.name}')\n # Mark train and test bathces\n if 'test' in fpath.name:\n data_split_name = 'test'\n else:\n data_split_name = 'train'\n # Iterate over labels\n for lpath in os.scandir(fpath.path):\n # Iterate over images\n for ipath in os.scandir(lpath.path):\n data_dict['filepath'].append(ipath.path)\n data_dict['label'].append(lpath.name)\n data_dict['splitname'].append(data_split_name)\n\n data = data.append(pd.DataFrame(data=data_dict), ignore_index=True)\n return data\n\n\ndef main(path):\n cifar10_path = path\n out_fname = 'cifar10DF.csv'\n df = cifar10_create_file_list(cifar10_path)\n df.to_csv(os.path.join(cifar10_path, out_fname), sep=';', index=False)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--data_path',\n default=None,\n type=str,\n help='Data path for cifar10.',\n action='store_true')\n args = parser.parse_args()\n if args.data_path:\n main(path=args.data_path)\n else:\n print('Please, set the data path')\n","sub_path":"data/cifar10_create_file_list.py","file_name":"cifar10_create_file_list.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"72894264","text":"from collections import deque\n\n\nclass Solution(object):\n def findOrder(self, numCourses, prerequisites):\n \"\"\"\n :type numCourses: int\n :type prerequisites: List[List[int]]\n :rtype: List[int]\n \"\"\"\n sortedOrder = []\n if numCourses <= 0:\n return sortedOrder\n\n # a. Initialize the graph\n inDegree = {i: 0 for i in range(numCourses)} # count of incoming edges\n graph = {i: [] for i in range(numCourses)} # adjacency list graph\n\n # b. Build the graph\n for prerequisite in prerequisites:\n parent, child = prerequisite[1], prerequisite[\n 0] # according to the prolem statement, 1st element will be child and second one will e parent\n graph[parent].append(child) # put the child into it's parent's list\n inDegree[child] += 1 # increment child's inDegree\n\n # c. Find all sources i.e., all vertices with 0 in-degrees\n sources = deque()\n for key in inDegree:\n if inDegree[key] == 0:\n sources.append(key)\n\n # d. For each source, add it to the sortedOrder and subtract one from all of its children's in-degrees\n # if a child's in-degree becomes zero, add it to the sources queue\n while sources:\n vertex = sources.popleft()\n sortedOrder.append(vertex)\n for child in graph[vertex]: # get the node's children to decrement their in-degrees\n inDegree[child] -= 1\n if inDegree[child] == 0:\n sources.append(child)\n\n # if sortedOrder doesn't contain all tasks, there is a cyclic dependency between tasks, therefore, we\n # will not be able to schedule all tasks\n if len(sortedOrder) != numCourses:\n return []\n\n return sortedOrder\n\n\n\n","sub_path":"leetcode.com/python/210_Course_Schedule_II.py","file_name":"210_Course_Schedule_II.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"393163851","text":"from helium import *\nfrom time import sleep\nimport json\nimport requests\nfrom win10toast import ToastNotifier\nimport datetime\n\n# -- data -- #\ncredentials = json.loads(''.join(open(\"credentials.json\").readlines()))\nINSTACART_EMAIL = credentials[\"INSTACART_EMAIL\"]\nINSTACART_PASSWORD = credentials[\"INSTACART_PASSWORD\"]\nSTORE_LIST = credentials[\"STORE_LIST\"]\nINSTACART_BASE_URL = credentials[\"INSTACART_BASE_URL\"]\nINSTACART_DELIVERY_URL = credentials[\"INSTACART_DELIVERY_URL\"]\nNOTIFICATION_EMAIL = credentials[\"NOTIFICATION_EMAIL\"]\n\nwait_time = 60 # in seconds\n\n# -- login logic -- #\nstart_chrome(INSTACART_BASE_URL)#, headless=True)\nclick(Link(\"Log In\"))\nwrite(INSTACART_EMAIL, into=\"Email address\")\nwrite(INSTACART_PASSWORD, into=\"Password\")\nclick(Button(\"Log In\"))\nwait_until(Link(\"See delivery times\").exists)\n\n\n# -- check store logic -- #\ndef check_delivery_times_for_store(store_name):\n go_to(INSTACART_DELIVERY_URL.format(store_name))\n sleep(7)\n if Text(\"No delivery times available\").exists():\n return False, \"No Delivery times available. Try again later?\"\n else:\n return (\n True,\n \"Delivery times found for {}! Please check soon :)\".format(store_name),\n )\n\n\n# -- send email -- #\ndef send_simple_message(message):\n print(\"[%s] Available!\" % datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n t = ToastNotifier()\n t.show_toast(\"Instacart\", \"Available!\", duration=10, threaded=True)\n return\n\n\n\n# -- check all stores in list and notify -- #\ndef main():\n for store in STORE_LIST:\n availability, message = check_delivery_times_for_store(store)\n if availability:\n send_simple_message(message)\n else:\n print(\"[%s] Checked\" % datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n\n\nif __name__ == \"__main__\":\n t = ToastNotifier()\n t.show_toast(\"Instacart\", \"Initialized\", duration=10, threaded=True)\n while True:\n main()\n sleep(wait_time - 7)\n","sub_path":"check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"417015789","text":"import json\r\nfrom RiotAPI import RiotAPI\r\nimport RiotConsts as Consts\r\nimport sys, getopt\r\nimport time\r\nfrom datetime import datetime, timezone\r\nfrom dateutil import tz\r\nimport asyncio\r\n\r\nimport discord\r\nfrom discord.ext import commands\r\n\r\n#Private file for individual use\r\nimport Private\r\n\r\napi = RiotAPI(Private.apiKey)\r\n\r\nglobal champions\r\nchampions = None\r\nglobal channel\r\nchannel = None\r\nglobal root\r\nroot = None\r\nglobal inputFile\r\ninputFile = ''\r\nglobal outputFile\r\noutputFile = ''\r\nglobal defaultJsonFile\r\ndefaultJsonFile = \"Testing2.json\"\r\n\r\nurl = {\r\n 'base': 'https://euw1.api.riotgames.com/{url}',\r\n 'league_by_summonerID': 'lol/league/{version}/positions/by-summoner/{encryptedSummonerId}',\r\n 'matches_by_accountID': '/lol/match/v4/matchlists/by-account/{encryptedAccountId}',\r\n 'match_by_matchID': '/lol/match/v4/matches/{matchId}',\r\n 'accountID_by_summonerName': '/lol/summoner/v4/summoners/by-name/{summonerName}'\r\n}\r\n\r\ndef ReadInputParams(argv):\r\n global inputFile\r\n global outputFile\r\n global defaultJsonFile\r\n try:\r\n opts, args = getopt.getopt(argv,\"hi:o:\",[\"ifile=\",\"ofile=\"])\r\n except getopt.GetoptError:\r\n print('test.py -i -o ')\r\n sys.exit(2)\r\n for opt, arg in opts:\r\n if opt == '-h':\r\n print('test.py -i -o ')\r\n sys.exit()\r\n elif opt in (\"-i\", \"--ifile\"):\r\n inputFile = arg\r\n elif opt in (\"-o\", \"--ofile\"):\r\n outputFile = arg\r\n if not outputFile and not inputFile:\r\n outputFile = defaultJsonFile\r\n inputFile = defaultJsonFile\r\n if not inputFile and outputFile:\r\n inputFile = outputFile\r\n if not outputFile and inputFile:\r\n outputFile = inputFile\r\n\r\n print('Input file is \"', inputFile)\r\n print('Output file is \"', outputFile)\r\n\r\ndef WriteJsonObjectToFile(fileName, root):\r\n with open(fileName, \"w\", encoding='utf-8-sig') as write_file:\r\n json.dump(root, write_file, indent=5, ensure_ascii=False)\r\n\r\ndef ReadJsonObjectFromFile(fileName):\r\n with open(fileName, \"r\", encoding='utf-8-sig') as read_file:\r\n return(json.load(read_file))\r\n\r\ndef EvaluateAccountInformationForAccountFromApi(account):\r\n result = api.get_accountID_by_summonerID(account[\"summonerName\"])\r\n account[\"summonerId\"] = result[\"id\"]\r\n account[\"accountId\"] = result[\"accountId\"]\r\n WriteJsonObjectToFile(outputFile, root)\r\n\r\ndef EvaluateKdaFromLastMatch(participantToAccountId):\r\n K = participantToAccountId[\"stats\"][\"kills\"]\r\n D = participantToAccountId[\"stats\"][\"deaths\"]\r\n A = participantToAccountId[\"stats\"][\"assists\"]\r\n #return(round((K + A)/max(D,1), 2))\r\n return(\"{0}/{1}/{2}\".format(K, D, A))\r\n\r\ndef EvaluateMatchResultFromLastMatch(participantToAccountId):\r\n return(participantToAccountId[\"stats\"][\"win\"])\r\n\r\ndef EvaluateChampionFromLastMatch(participantToAccountId):\r\n for champion in champions[\"champions\"]:\r\n if champion[\"championId\"] == participantToAccountId[\"championId\"]:\r\n return(champion[\"championName\"])\r\n\r\ndef UnixTimeStampToLocalTimezoneTimeStampConverter(unixTimeStamp):\r\n return(time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime(unixTimeStamp)))\r\n\r\nasync def EvaluateMostRecentMatchResult(matchId, account):\r\n accountId = account[\"accountId\"]\r\n result = api.get_match_by_matchID(matchId)\r\n #WIP\r\n for participantIdentity in result[\"participantIdentities\"]:\r\n if participantIdentity[\"player\"][\"accountId\"] == accountId:\r\n participantId = participantIdentity[\"participantId\"]\r\n for participant in result[\"participants\"]:\r\n if participant[\"participantId\"] == participantId:\r\n account[\"mostRecentKda\"] = EvaluateKdaFromLastMatch(participant)\r\n account[\"mostRecentMatchWon\"] = EvaluateMatchResultFromLastMatch(participant)\r\n account[\"mostRecentChampion\"] = EvaluateChampionFromLastMatch(participant)\r\n account[\"mostRecentMatchTimeStamp\"] = UnixTimeStampToLocalTimezoneTimeStampConverter(result[\"gameCreation\"] / 1000 + result[\"gameDuration\"])\r\n await PostLatestMatchInfoToDiscord(account)\r\n WriteJsonObjectToFile(outputFile, root)\r\n\r\n#In Progress\r\nasync def EvaluateMatchListForAccountFromApi(account):\r\n result = api.get_matches_by_accountID(account[\"accountId\"])\r\n lastMatchId = result[\"matches\"][0][\"gameId\"]\r\n #WIP\r\n if (account[\"lastMatchId\"] == None) or (account[\"lastMatchId\"] != lastMatchId):\r\n account[\"lastMatchId\"] = lastMatchId\r\n #Append regional timestamp of last match to json\r\n await EvaluateMostRecentMatchResult(lastMatchId, account)\r\n\r\nasync def EvaluateAccountInformationForUsers(users):\r\n for user in users:\r\n for account in user[\"accounts\"]:\r\n if (account[\"summonerId\"] == None or account[\"accountId\"] == None):\r\n EvaluateAccountInformationForAccountFromApi(account)\r\n await EvaluateMatchListForAccountFromApi(account)\r\n\r\nasync def PostLatestMatchInfoToDiscord(account):\r\n await channel.send(\"```fix\\n{0} just played a match.\\n\\nDate: {1}\\nWin: {2}\\nChampion: {3}\\nKDA: {4}\\n```\".format(account[\"summonerName\"], account[\"mostRecentMatchTimeStamp\"], account[\"mostRecentMatchWon\"], account[\"mostRecentChampion\"], account[\"mostRecentKda\"]))\r\n\r\nbot = commands.Bot(command_prefix = '.')\r\n\r\n@bot.event\r\nasync def on_ready():\r\n global channel\r\n channel = bot.get_channel(Private.channelId)\r\n global champions\r\n global root\r\n global inputFile\r\n global outputFile\r\n #Global object in Testing.py with name \"root\"\r\n ReadInputParams(sys.argv[1:])\r\n root = ReadJsonObjectFromFile(inputFile)\r\n champions = ReadJsonObjectFromFile(\"Champions.json\")\r\n while True:\r\n await EvaluateAccountInformationForUsers(root[\"users\"])\r\n WriteJsonObjectToFile(outputFile, root)\r\n\r\n #Request every 2 mins\r\n print(\"2 mins timer active.\")\r\n time.sleep(120)\r\n\r\nbot.run(Private.discordToken)\r\n\r\n# while True:\r\n# loop = asyncio.new_event_loop()\r\n# asyncio.set_event_loop(loop)\r\n\r\n# ReadInputParams(sys.argv[1:])\r\n# #Global object in Testing.py with name \"root\"\r\n# root = ReadJsonObjectFromFile(inputFile)\r\n# champions = ReadJsonObjectFromFile(\"Champions.json\")\r\n# loop.run_until_complete(EvaluateAccountInformationForUsers(root[\"users\"]))\r\n# WriteJsonObjectToFile(outputFile, root)\r\n# print(\"120 seconds timer\")\r\n# time.sleep(120)\r\n","sub_path":"Testing.py","file_name":"Testing.py","file_ext":"py","file_size_in_byte":6569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"39629906","text":"# coding: utf-8\nimport unittest\n\n__author__ = 'jensen lau'\n# Given a singly linked list L : L0 ! L1 ! \u0001 \u0001 \u0001 ! Ln􀀀1 ! Ln, reorder it to: L0 ! Ln ! L1 ! Ln􀀀1 ! L2 ! Ln􀀀2 ! \u0001 \u0001 \u0001\n# You must do this in-place without altering the nodes’ values.\n# For example, Given {1,2,3,4}, reorder it to {1,4,2,3}.\n\n\n\nclass MyTest(unittest.TestCase):\n def test(self):\n self.assertEquals(1, 1)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n\n\n\n\n\n","sub_path":"ReorderList.py","file_name":"ReorderList.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"51622858","text":"# completa el código de la función\ndef amigos(a,b):\n sumaa=0\n sumab=0\n for i in range(1,a):\n if a%i==0:\n sumaa+=i\n for i in range(1,b):\n if b%i==0:\n sumab+=i\n if sumaa==b and sumab==a:\n return True\n else:\n return False\n ","sub_path":"tema2_ej2/tema2_ej2_15638375.py","file_name":"tema2_ej2_15638375.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"523812496","text":"\"\"\"\nYou are given a map in form of a two-dimensional integer grid where 1 represents land \nand 0 represents water. Grid cells are connected horizontally/vertically (not diagonally). \nThe grid is completely surrounded by water, and there is exactly one island (i.e., one or \nmore connected land cells). The island doesn't have \"lakes\" (water inside that isn't \nconnected to the water around the island). One cell is a square with side length 1. \nThe grid is rectangular, width and height don't exceed 100. Determine the perimeter \nof the island.\n\"\"\"\nclass Solution:\n def islandPerimeter(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n def checkSurroundings(grid, x, y):\n edges = 0 \n grid[y][x] = -1\n \n if y-1 >= 0:\n if grid[y-1][x] == 1:\n grid, temp = checkSurroundings(grid, x, y-1)\n edges += temp\n elif grid[y-1][x] == 0:\n edges += 1\n else:\n edges += 1\n\n if y+1 < len(grid):\n if grid[y+1][x] == 1:\n grid, temp = checkSurroundings(grid, x, y+1)\n edges += temp\n elif grid[y+1][x] == 0:\n edges += 1\n else:\n edges += 1\n\n if x-1 >= 0:\n if grid[y][x-1] == 1:\n grid, temp = checkSurroundings(grid, x-1, y)\n edges += temp\n elif grid[y][x-1] == 0:\n edges += 1\n else:\n edges += 1\n\n if x+1 < len(grid[0]):\n if grid[y][x+1] == 1:\n grid, temp = checkSurroundings(grid, x+1, y)\n edges += temp\n elif grid[y][x+1] == 0:\n edges += 1\n else:\n edges += 1\n\n return grid, edges\n \n for y in range(len(grid)):\n for x in range(len(grid[0])):\n if grid[y][x] == 1:\n return checkSurroundings(grid, x, y)[1]\n","sub_path":"Python3/IslandPerimiter.py","file_name":"IslandPerimiter.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"451476874","text":"# This module is intended to determine the probability a triangle is obtuse\n# Given that its vertices are determined uniformly in the unit square\n\nfrom numpy.random import rand\nfrom numpy import mean, std\nfrom scipy.stats import sem, t\nimport matplotlib.pyplot as plt\nfrom math import sqrt, acos, pi\nfrom itertools import combinations\nfrom sys import argv\n\ndef CreateTriangle():\n\t\"\"\"Creates a random triangle in the unit square\n\tReturns a list with three points of the form (x, y)\n\tThese coordinates are generated uniformly in the unit square\"\"\"\n\treturn [(x, y) for x, y in zip(rand(3), rand(3))]\n\ndef DistPoints(p1, p2):\n\t\"\"\"Calculate the distance between two points of the form (x, y)\"\"\"\n\treturn sqrt(pow(p2[0] - p1[0], 2) + pow(p2[1] - p1[1], 2))\n\ndef TriangleAngles(alist):\n\t\"\"\"Determine the angles of a triangle given a list of its vertices\n\tThe list must contain three tuples of the form (x, y)\n\tThis function does not check to ensure this requirement is met\"\"\"\n\ta, b, c = (DistPoints(tup[0], tup[1]) for tup in combinations(alist, 2))\n\tangles = [acos((a**2 + b**2 - c**2) / (2*a*b)),\n\t\t\t acos((a**2 + c**2 - b**2) / (2*a*c)),\n\t\t\t acos((b**2 + c**2 - a**2) / (2*b*c))]\n\treturn angles\n\ndef isObtuse(alist):\n\t\"\"\"Determine whether a triangle is an obtuse triangle given a list of its vertices\n\tThe list must contain three tuples of the form (x, y)\n\tThis function does not check to ensure this requirement is met\"\"\"\n\tfor angle in TriangleAngles(alist):\n\t\tif angle > pi/2:\n\t\t\treturn True\n\treturn False\n\ndef isRight(alist):\n\t\"\"\"Determine whether a triangle is a right triangle given a list of its vertices\n\tThe list must contain three tuples of the form (x, y)\n\tThis function does not check to ensure this requirement is met\n\tThis function is supplementary and is not used in this module\"\"\"\n\treturn any(not (x - pi/2) for x in TriangleAngles(alist))\n\ndef isAcute(alist):\n\t\"\"\"Determine whether a triangle is an acute triangle given a list of its vertices\n\tThe list must contain three tuples of the form (x, y)\n\tThis function does not check to ensure this requirement is met\n\tThis function is supplementary and is not used in this module\"\"\"\n\tfor angle in TriangleAngles(alist):\n\t\tif angle == pi/2 or angle > pi/2:\n\t\t\treturn False\n\treturn True\n\ndef CI_mean(alist, alpha=.95, round_num=False):\n\t\"\"\"Generate a confidence interval about the mean of a list of numbers with confidence alpha\n\tSpecify a round_num (int) to round the values of your confidence interval to round_num decimal places\"\"\"\n\tmu, s = mean(alist), sem(alist)\n\tn = len(alist)\n\tHL = s * t.ppf((1+alpha)/2, n-1)\n\treturn tuple(round(x, 2) for x in (mu - HL, mu + HL)) if type(round_num) is int else (mu - HL, mu + HL)\n\nif __name__ == '__main__':\n\titerations = int(argv[2]) if len(argv) >= 3 else 1\n\ttrials_per_iteration = int(argv[1]) if len(argv) >= 2 else 50\n\talpha = float(argv[3]) if len(argv) >= 4 else .95\n\ttotalProbs = [mean([isObtuse(CreateTriangle()) for x in range(trials_per_iteration)]) for i in range(iterations)]\n\n\tmystr =\\\nf\"\"\"The mean probability of a triangle being obtuse is: {round(mean(totalProbs), 2)}\nThe standard deviation is: {round(std(totalProbs), 2)}\nA {100*alpha}% confidence interval for the mean of this probability is: {CI_mean(totalProbs, alpha, 2)}\"\"\"\n\tprint(mystr)\n\n\tplt.hist(totalProbs)\n\tplt.show()","sub_path":"ObtuseTriangleProbability.py","file_name":"ObtuseTriangleProbability.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"113489925","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='UserLink',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('target1', models.ForeignKey(related_name='target1', to=settings.AUTH_USER_MODEL)),\n ('target2', models.ForeignKey(related_name='target2', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='UserProfile',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('nick_name', models.CharField(max_length=30, verbose_name='名前')),\n ('channel_page', models.URLField(null=True, verbose_name='チャンネル', blank=True)),\n ('target_user', models.ForeignKey(unique=True, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"cms/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"281769788","text":"try:\n\tfrom urllib import quote_plus #python 2\nexcept:\n\tpass\n\ntry:\n\tfrom urllib.parse import quote_plus #python 3\nexcept: \n\tpass\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.utils import timezone\nfrom django.shortcuts import render, get_object_or_404\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.http import HttpResponseRedirect, Http404\nfrom django.urls import reverse_lazy\nfrom django.views.generic import (\n\tFormView,\n\tListView,\n\tDetailView,\n\tCreateView,\n\tUpdateView,\n\tDeleteView,\n)\n\nfrom comments.models import Comment\nfrom comments.forms import CommentForm\nfrom .models import Post\nfrom .forms import PostForm\nfrom tags.models import Tag\n\nUser = get_user_model()\n\n# Create your views here.\nclass PostListView(ListView):\n\tmodel = Post\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(PostListView, self).get_context_data(**kwargs)\n\t\tpaginator = Paginator(context['post_list'], 10)\n\n\t\tpage = self.request.GET.get('page')\n\t\ttry:\n\t\t\tcontext['post_list'] = paginator.page(page)\n\t\texcept PageNotAnInteger:\n\t\t\t# If page is not an integer, deliver first page.\n\t\t\tcontext['post_list'] = paginator.page(1)\n\t\texcept EmptyPage:\n\t\t\t# If page is out of range (e.g. 9999), deliver last page of results.\n\t\t\tcontext['post_list'] = paginator.page(paginator.num_pages)\n\t\t\n\t\tuser_list = User.objects.all()\n\t\tcontext['user_list'] = user_list\n\t\tcontext['paginator'] = paginator\n\n\t\ttag_list = Tag.objects.all()\n\t\tif not self.request.user.is_superuser:\n\t\t\ttag_list = Tag.objects.all().exclude(tag='hango')\t\t\n\t\t\n\t\ttag_list = sorted(tag_list, key=lambda x: x.post_set.count(), reverse=True)\n\t\tcontext['tag_list'] = tag_list[:10]\n\n\t\treturn context\n\n\tdef get_queryset(self):\n\t\tinstances = super().get_queryset()\n\t\tif not self.request.user.is_superuser:\t\n\t\t\tinstances = instances.exclude(tags__tag='hango')\n\t\treturn instances\n\nclass PostCreateView(FormView):\n\n\ttemplate_name = 'posts/post_form.html'\n\tmodel = Post\n\tform_class = PostForm\n\tsuccess_url = reverse_lazy(\"posts:list\")\n\t# fields = ['title', 'content', 'publish', 'language']\n\n\tdef dispatch(self, request, *args, **kwargs):\n\t\tif not self.request.user.is_authenticated:\n\t\t\traise Http404\n\t\treturn super(PostCreateView, self).dispatch(request, *args, **kwargs)\n\n\tdef form_valid(self, form):\n\t\tpost = form.save()\n\t\tform_valid = super(PostCreateView, self).form_valid(form)\n\t\tif form.cleaned_data[\"tag\"]:\n\t\t\tnew_tag, created = Tag.objects.get_or_create(tag=form.cleaned_data[\"tag\"])\n\t\telse:\n\t\t\tnew_tag, created = Tag.objects.get_or_create(tag=\"blog\")\n\t\tpost.tags.add(new_tag)\n\t\treturn form_valid\n\nclass PostDetailView(DetailView):\n\tmodel = Post\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(PostDetailView, self).get_context_data(**kwargs)\n\t\treturn context\n\ndef post_detail(request, slug=None):\n\tinstance = get_object_or_404(Post, slug=slug)\n\t# if instance.publish > timezone.now().date() or instance.draft:\n\t\t# if not request.user.is_staff or not request.user.is_superuser:\n\t\t\t# raise Http404\n\t# share_string = quote_plus(instance.content)\n\n\tinitial_data = {\n\t\t\t\"content_type\": instance.get_content_type,\n\t\t\t\"object_id\": instance.id\n\t}\n\tform = CommentForm(request.POST or None, initial=initial_data)\n\tif form.is_valid() and request.user.is_authenticated():\n\t\tc_type = form.cleaned_data.get(\"content_type\")\n\t\tcontent_type = ContentType.objects.get(model=c_type)\n\t\tobj_id = form.cleaned_data.get('object_id')\n\t\tcontent_data = form.cleaned_data.get(\"content\")\n\t\tparent_obj = None\n\t\ttry:\n\t\t\tparent_id = int(request.POST.get(\"parent_id\"))\n\t\texcept:\n\t\t\tparent_id = None\n\n\t\tif parent_id:\n\t\t\tparent_qs = Comment.objects.filter(id=parent_id)\n\t\t\tif parent_qs.exists() and parent_qs.count() == 1:\n\t\t\t\tparent_obj = parent_qs.first()\n\n\n\t\tnew_comment, created = Comment.objects.get_or_create(\n\t\t\t\t\t\t\tuser = request.user,\n\t\t\t\t\t\t\tcontent_type= content_type,\n\t\t\t\t\t\t\tobject_id = obj_id,\n\t\t\t\t\t\t\tcontent = content_data,\n\t\t\t\t\t\t\tparent = parent_obj,\n\t\t\t\t\t\t)\n\t\treturn HttpResponseRedirect(new_comment.content_object.get_absolute_url())\n\n\tcomments = instance.comments\n\tcontext = {\n\t\t\"title\": instance.title,\n\t\t\"object\": instance,\n\t\t# \"share_string\": share_string,\n\t\t\"comments\": comments,\n\t\t\"comment_form\":form,\n\t}\n\treturn render(request, \"posts/post_detail.html\", context)\n\nclass PostUpdateView(LoginRequiredMixin, UpdateView):\n\ttemplate_name = 'posts/post_form.html'\n\tmodel = Post\n\tform_class = PostForm\n\tsuccess_url = reverse_lazy(\"posts:list\")\n\n\tdef dispatch(self, request, *args, **kwargs):\n\t\tinstance = self.get_object()\n\t\tif not self.request.user.is_superuser:\n\t\t\tif instance.user != self.request.user:\n\t\t\t\traise Http404\n\t\treturn super(PostUpdateView, self).dispatch(request, *args, **kwargs)\n\n\tdef form_valid(self, form):\n\t\tpost = form.save()\n\t\tform_valid = super(PostUpdateView, self).form_valid(form)\n\t\tif form.cleaned_data[\"tag\"]:\n\t\t\tnew_tag, created = Tag.objects.get_or_create(tag=form.cleaned_data[\"tag\"])\n\t\telse:\n\t\t\tnew_tag, created = Tag.objects.get_or_create(tag=\"blog\")\n\t\tpost.tags.add(new_tag)\n\t\treturn form_valid\n\nclass PostDeleteView(DeleteView):\n\tmodel = Post\n\tsuccess_url = reverse_lazy(\"posts:list\")\n\n\tdef dispatch(self, request, *args, **kwargs):\n\t\tinstance = self.get_object()\n\t\tif instance.user != self.request.user:\n\t\t\traise Http404\n\t\treturn super(PostDeleteView, self).dispatch(request, *args, **kwargs)","sub_path":"src/posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"237144113","text":"# The difference between the sum of the squares of the first ten natural numbers\n# and the square of the sum is 3025 385 = 2640.\n# Find the difference between the sum of the squares of the first one hundred natural numbers\n# and the square of the sum.\n\n\ndef getSumOfSquares(testNum):\n sum = 0\n for i in range(1, testNum):\n sum += (i ** 2)\n return sum\n\n\ndef getSquareOfSums(testNum):\n sum = 0\n for i in range(1, testNum):\n sum += i\n return sum ** 2\n\n\ndef main():\n testNum = 101\n squareOfSums = getSquareOfSums(testNum)\n sumOfSquares = getSumOfSquares(testNum)\n\n print(str(squareOfSums) + ' - ' + str(sumOfSquares) + ' = ' + str(squareOfSums - sumOfSquares))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/python/Problem06.py","file_name":"Problem06.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"38814430","text":"from tkinter import *\r\nimport math\r\n\r\nENTER = '0'\r\nDOT = True\r\n\r\ndef clear():\r\n global ENTER\r\n ENTER = '0'\r\n screen['text'] = ENTER\r\n\r\ndef equal():\r\n global ENTER\r\n if ENTER[-1] == '+' or ENTER[-1] == '-' or ENTER[-1] == '×' or ENTER[-1] == '÷' or ENTER[-1] == '.':\r\n pass\r\n else:\r\n result = ENTER.replace('×','*').replace('÷','/')\r\n try:\r\n result = eval(result)\r\n except ZeroDivisionError:\r\n result = 'Error'\r\n if math.modf(result)[0] == 0.0:\r\n result = int(result)\r\n ENTER = str(result)\r\n screen['text'] = ENTER\r\n\r\ndef plus():\r\n global ENTER\r\n global DOT\r\n if ENTER[-1] == '+' or ENTER[-1] == '-' or ENTER[-1] == '×' or ENTER[-1] == '÷' or ENTER[-1] == '.':\r\n pass\r\n else:\r\n ENTER = ENTER + '+'\r\n screen['text'] = ENTER\r\n DOT = True\r\n\r\ndef substract():\r\n global ENTER\r\n global DOT\r\n if ENTER[-1] == '+' or ENTER[-1] == '-' or ENTER[-1] == '×' or ENTER[-1] == '÷' or ENTER[-1] == '.':\r\n pass\r\n else:\r\n ENTER = ENTER + '-'\r\n screen['text'] = ENTER\r\n DOT = True\r\n\r\n\r\ndef multiple():\r\n global ENTER\r\n global DOT\r\n if ENTER[-1] == '+' or ENTER[-1] == '-' or ENTER[-1] == '×' or ENTER[-1] == '÷' or ENTER[-1] == '.':\r\n pass\r\n else:\r\n ENTER = ENTER + '×'\r\n screen['text'] = ENTER\r\n DOT = True\r\n\r\n\r\ndef divide():\r\n global ENTER\r\n global DOT\r\n if ENTER[-1] == '+' or ENTER[-1] == '-' or ENTER[-1] == '×' or ENTER[-1] == '÷' or ENTER[-1] == '.':\r\n pass\r\n else:\r\n ENTER = ENTER + '÷'\r\n screen['text'] = ENTER\r\n DOT = True\r\n\r\n\r\ndef add_dot():\r\n global ENTER\r\n global DOT\r\n if DOT:\r\n if ENTER[-1] == '+' or ENTER[-1] == '-' or ENTER[-1] == '×' or ENTER[-1] == '÷' or ENTER[-1] == '.':\r\n pass\r\n else:\r\n ENTER = ENTER + '.'\r\n screen['text'] = ENTER\r\n DOT = False\r\n\r\ndef add_zero():\r\n global ENTER\r\n if ENTER == '0':\r\n pass\r\n else:\r\n ENTER = ENTER + '0'\r\n screen['text'] = ENTER\r\n\r\ndef add_one():\r\n global ENTER\r\n if ENTER == '0':\r\n ENTER = '1'\r\n else:\r\n ENTER = ENTER + '1'\r\n screen['text'] = ENTER\r\n\r\ndef add_two():\r\n global ENTER\r\n if ENTER == '0':\r\n ENTER = '2'\r\n else:\r\n ENTER = ENTER + '2'\r\n screen['text'] = ENTER\r\n\r\ndef add_three():\r\n global ENTER\r\n if ENTER == '0':\r\n ENTER = '3'\r\n else:\r\n ENTER = ENTER + '3'\r\n screen['text'] = ENTER\r\n\r\ndef add_four():\r\n global ENTER\r\n if ENTER == '0':\r\n ENTER = '4'\r\n else:\r\n ENTER = ENTER + '4'\r\n screen['text'] = ENTER\r\n\r\ndef add_five():\r\n global ENTER\r\n if ENTER == '0':\r\n ENTER = '5'\r\n else:\r\n ENTER = ENTER + '5'\r\n screen['text'] = ENTER\r\n\r\ndef add_six():\r\n global ENTER\r\n if ENTER == '0':\r\n ENTER = '6'\r\n else:\r\n ENTER = ENTER + '6'\r\n screen['text'] = ENTER\r\n\r\ndef add_seven():\r\n global ENTER\r\n if ENTER == '0':\r\n ENTER = '7'\r\n else:\r\n ENTER = ENTER + '7'\r\n screen['text'] = ENTER\r\n\r\ndef add_eight():\r\n global ENTER\r\n if ENTER == '0':\r\n ENTER = '8'\r\n else:\r\n ENTER = ENTER + '8'\r\n screen['text'] = ENTER\r\n\r\ndef add_nine():\r\n global ENTER\r\n if ENTER == '0':\r\n ENTER = '9'\r\n else:\r\n ENTER = ENTER + '9'\r\n screen['text'] = ENTER\r\n\r\nwindow = Tk()\r\nwindow.config(padx=25,pady=25)\r\n\r\nscreen = Label(text=0,bg='black',fg='white',width=24,height=3,anchor='w')\r\nscreen.grid(column=0,row=0,columnspan=4)\r\n\r\nc_button = Button(text='C',bg='white',fg='black',width=17,command=clear)\r\nc_button.grid(column=0,row=1,columnspan=3)\r\nplus_button = Button(text='+',bg='white',fg='black',width=5,command=plus)\r\nplus_button.grid(column=3,row=1)\r\nsubtract_button = Button(text='-',bg='white',fg='black',width=5,command=substract)\r\nsubtract_button.grid(column=3,row=2)\r\nmultiply_button = Button(text='×',bg='white',fg='black',width=5,command=multiple)\r\nmultiply_button.grid(column=3,row=3)\r\ndivide_button = Button(text='÷',bg='white',fg='black',width=5,command=divide)\r\ndivide_button.grid(column=3,row=4)\r\nequal_button = Button(text='=',bg='white',fg='black',width=5,command=equal)\r\nequal_button.grid(column=3,row=5)\r\ndot_button = Button(text='.',bg='white',fg='black',width=5,command=add_dot)\r\ndot_button.grid(column=2,row=5)\r\none_button = Button(text='1',bg='white',fg='black',width=5,command=add_one)\r\none_button.grid(column=0,row=4)\r\ntwo_button = Button(text='2',bg='white',fg='black',width=5,command=add_two)\r\ntwo_button.grid(column=1,row=4)\r\nthree_button = Button(text='3',bg='white',fg='black',width=5,command=add_three)\r\nthree_button.grid(column=2,row=4)\r\nfour_button = Button(text='4',bg='white',fg='black',width=5,command=add_four)\r\nfour_button.grid(column=0,row=3)\r\nfive_button = Button(text='5',bg='white',fg='black',width=5,command=add_five)\r\nfive_button.grid(column=1,row=3)\r\nsix_button = Button(text='6',bg='white',fg='black',width=5,command=add_six)\r\nsix_button.grid(column=2,row=3)\r\nseven_button = Button(text='7',bg='white',fg='black',width=5,command=add_seven)\r\nseven_button.grid(column=0,row=2)\r\neight_button = Button(text='8',bg='white',fg='black',width=5,command=add_eight)\r\neight_button.grid(column=1,row=2)\r\nnine_button = Button(text='9',bg='white',fg='black',width=5,command=add_nine)\r\nnine_button.grid(column=2,row=2)\r\nzero_button = Button(text='0',bg='white',fg='black',width=11,command=add_zero)\r\nzero_button.grid(column=0,row=5,columnspan=2)\r\n\r\nwindow.mainloop()","sub_path":"my_calculator.py","file_name":"my_calculator.py","file_ext":"py","file_size_in_byte":5609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"375227151","text":"#!/usr/bin/python\n#\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nfrom unittest.mock import patch\n\nfrom google.datacatalog_connectors.mysql_ import datacatalog_cli\n\n\n@patch('google.datacatalog_connectors.rdbms.sync.'\n 'datacatalog_synchronizer.DataCatalogSynchronizer.__init__',\n lambda self, **kargs: None)\nclass DatacatalogCLITestCase(unittest.TestCase):\n\n @patch('argparse.ArgumentParser.parse_args')\n @patch('argparse.ArgumentParser.add_argument')\n @patch('google.datacatalog_connectors.rdbms.sync.'\n 'datacatalog_synchronizer.DataCatalogSynchronizer.run')\n def test_datacatalog_cli_run_should_not_raise_error(\n self, run, add_argument, parse_args): # noqa\n mocked_parse_args = DictWithAttributeAccess()\n\n mocked_parse_args.service_account_path = 'service_account.json'\n\n mocked_parse_args.datacatalog_project_id = 'test_project_id'\n mocked_parse_args.datacatalog_location_id = 'location_id'\n mocked_parse_args.datacatalog_entry_group_id = 'entry_group_id'\n mocked_parse_args.datacatalog_entry_resource_url_prefix =\\\n 'user_defined_host'\n mocked_parse_args.mysql_host = 'host'\n mocked_parse_args.mysql_user = 'user'\n mocked_parse_args.mysql_pass = 'pass'\n mocked_parse_args.mysql_database = 'db'\n mocked_parse_args.raw_metadata_csv = 'csv'\n mocked_parse_args.enable_monitoring = True\n\n parse_args.return_value = mocked_parse_args\n\n datacatalog_cli.MySQL2DatacatalogCli().run({})\n\n for call_arg in add_argument.call_args_list:\n arg = call_arg[0]\n command = arg[0]\n # Verify args which should not contain the required attribute\n if '--service-account-path' in command \\\n or '--mysql-user' in command \\\n or '--mysql-pass' in command \\\n or '--mysql-database' in command \\\n or '--datacatalog-entry-resource-url-prefix' in command \\\n or '--raw-metadata-csv' in command \\\n or '--enable-monitoring' in command \\\n or '--datacatalog-entry-group-id' in command:\n params = call_arg[1]\n required = params.get('required')\n self.assertFalse(required)\n elif '-h' not in command:\n params = call_arg[1]\n required = params['required']\n self.assertTrue(required)\n\n self.assertEqual(run.call_count, 1)\n\n\nclass DictWithAttributeAccess(dict):\n\n def __getattr__(self, key):\n return self[key]\n\n def __setattr__(self, key, value):\n self[key] = value\n","sub_path":"google-datacatalog-mysql-connector/tests/google/datacatalog_connectors/mysql_/datacatalog_cli_test.py","file_name":"datacatalog_cli_test.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"59926382","text":"from hypergan.gan_component import GANComponent\nimport numpy as np\nimport tensorflow as tf\n\nclass BaseLoss(GANComponent):\n def __init__(self, gan, config, discriminator=None, generator=None, x=None, split=2, d_fake=None, d_real=None, reuse=False, name=\"BaseLoss\"):\n self.sample = None\n self.ops = None\n self.reuse=reuse\n self.x = x\n self.d_fake = d_fake\n self.d_real = d_real\n self.discriminator = discriminator or gan.discriminator\n self.generator = generator\n self.split = split\n GANComponent.__init__(self, gan, config, name=name)\n\n def reuse(self, d_real=None, d_fake=None):\n self.discriminator.ops.reuse()\n net = self._create(d_real, d_fake)\n self.discriminator.ops.stop_reuse()\n return net\n\n\n def create(self):\n gan = self.gan\n config = self.config\n ops = self.gan.ops\n split = self.split\n d_real = self.d_real\n d_fake = self.d_fake\n\n d_loss = None\n g_loss = None\n if d_real is None or d_fake is None:\n # Not passed in, lets populate d_real/d_fake\n\n net = self.discriminator.sample\n\n ds = self.split_batch(net, split)\n d_real = ds[0]\n d_fake = tf.add_n(ds[1:])/(len(ds)-1)\n d_loss, g_loss = self._create(d_real, d_fake)\n else:\n d_loss, g_loss = self._create(d_real, d_fake)\n\n d_regularizers = []\n g_regularizers = []\n d_loss_features = d_loss\n g_loss_features = g_loss\n self.d_loss_features = d_loss_features\n self.g_loss_features = g_loss_features\n\n if config.random_penalty:\n gp = self.random_penalty(d_fake, d_real)\n d_regularizers.append(gp)\n self.add_metric('random_penalty', ops.squash(gp, tf.reduce_mean))\n\n if self.gan.config.infogan and not hasattr(self.gan, 'infogan_q'):\n sample = self.gan.generator.sample\n d = self.gan.create_component(self.gan.config.discriminator, name=\"discriminator\", input=sample, reuse=True, features=[tf.zeros([1,16,16,256])])\n last_layer = d.controls['infogan']\n q = self.gan.create_component(self.gan.config.infogan, input=(self.gan.discriminator.controls['infogan']), name='infogan')\n self.gan.infogan_q=q\n std_cont = tf.sqrt(tf.exp(q.sample))\n true = self.gan.uniform_distribution.z\n mean = tf.reshape(q.sample, self.ops.shape(true))\n std_cont = tf.reshape(std_cont, self.ops.shape(true))\n eps = (true - mean) / (std_cont + 1e-8)\n continuous = -tf.reduce_mean( -0.5 * np.log(2*np.pi)- tf.log(std_cont+1e-8)*tf.square(eps), reduction_indices=1)\n if self.gan.config.infogan.flipped:\n continuous = -continuous\n\n self.metrics['cinfo']=ops.squash(continuous)\n d_regularizers.append(continuous)\n\n d_regularizers += self.d_regularizers()\n g_regularizers += self.g_regularizers()\n\n print(\"prereg\", d_loss)\n if len(d_regularizers) > 0:\n d_loss += tf.add_n(d_regularizers)\n if len(g_regularizers) > 0:\n g_loss += tf.add_n(g_regularizers)\n\n d_loss = ops.squash(d_loss, config.reduce or tf.reduce_mean) #linear doesn't work with this\n\n # TODO: Why are we squashing before gradient penalty?\n self.add_metric('d_loss', d_loss)\n if g_loss is not None:\n g_loss = ops.squash(g_loss, config.reduce or tf.reduce_mean)\n self.add_metric('g_loss', g_loss)\n\n self.sample = [d_loss, g_loss]\n self.d_loss = d_loss\n self.g_loss = g_loss\n self.d_fake = d_fake\n self.d_real = d_real\n\n return self.sample\n\n def d_regularizers(self):\n return []\n\n def g_regularizers(self):\n return []\n\n def rothk_penalty(self, d_real, d_fake):\n config = self.config\n g_sample = self.gan.uniform_sample\n x = self.gan.inputs.x\n gradx = tf.gradients(d_real, [x])[0]\n gradg = tf.gradients(d_fake, [g_sample])[0]\n gradx = tf.reshape(gradx, [self.ops.shape(gradx)[0], -1])\n gradg = tf.reshape(gradg, [self.ops.shape(gradg)[0], -1])\n gradx_norm = tf.norm(gradx, axis=1, keep_dims=True)\n gradg_norm = tf.norm(gradg, axis=1, keep_dims=True)\n if int(gradx_norm.get_shape()[0]) != int(d_real.get_shape()[0]):\n print(\"Condensing along batch for rothk\")\n gradx_norm = tf.reduce_mean(gradx_norm, axis=0)\n gradg_norm = tf.reduce_mean(gradg_norm, axis=0)\n gradx = tf.square(gradx_norm) * tf.square(1-tf.nn.sigmoid(d_real))\n gradg = tf.square(gradg_norm) * tf.square(tf.nn.sigmoid(d_fake))\n loss = gradx + gradg\n loss *= config.rothk_lambda or 1\n if config.rothk_decay:\n decay_function = config.decay_function or tf.train.exponential_decay\n decay_steps = config.decay_steps or 50000\n decay_rate = config.decay_rate or 0.9\n decay_staircase = config.decay_staircase or False\n global_step = tf.train.get_global_step()\n loss = decay_function(loss, global_step, decay_steps, decay_rate, decay_staircase)\n\n return loss\n\n def random_penalty(self, d_fake, d_real):\n config = self.config\n gan = self.gan\n ops = self.gan.ops\n gradient_penalty = config.gradient_penalty\n x = self.x \n if x is None:\n x=gan.inputs.x\n shape = [1 for t in ops.shape(x)]\n shape[0] = gan.batch_size()\n uniform_noise = tf.random_uniform(shape=shape,minval=0.,maxval=1.)\n mask = tf.cast(tf.greater(0.5, uniform_noise), tf.float32)\n #interpolates = x * mask + g * (1-mask)\n d = d_fake *(1-mask) + d_real * mask#discriminator.reuse(interpolates)\n offset = config.random_penalty_offset or -0.8\n penalty = tf.square(d - offset)\n return penalty\n\n\n def sigmoid_kl_with_logits(self, logits, targets):\n # broadcasts the same target value across the whole batch\n # this is implemented so awkwardly because tensorflow lacks an x log x op\n assert isinstance(targets, float)\n if targets in [0., 1.]:\n entropy = 0.\n else:\n entropy = - targets * np.log(targets) - (1. - targets) * np.log(1. - targets)\n return tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=tf.ones_like(logits) * targets) - entropy\n","sub_path":"hypergan/losses/base_loss.py","file_name":"base_loss.py","file_ext":"py","file_size_in_byte":6518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"163914262","text":"# -*- coding: utf-8 -*-\n\n\nfrom __future__ import print_function\n\nimport os\nimport pandas as pd\nfrom shutil import copyfile\n\ndef checkDir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)\n \ndef dataSplitter(validateFactor, testFactor, dataSamples, table):\n train = table.iloc[:int(validateFactor*dataSamples),:]\n validate = table.iloc[int(validateFactor*dataSamples):int(testFactor*dataSamples),:]\n test = table.iloc[int(testFactor*dataSamples):,:]\n return train, validate, test\n\ndef makeClassSubdir(rootDir, classes):\n for classDir in classes:\n dirToMake = os.path.join(rootDir,classDir)\n if not os.path.exists(dirToMake):\n os.makedirs(dirToMake)\n\ndef sortImages(df, rootDir, fraction):\n for iPath in range(len(df['lifeSpanDuration'])):\n #src = os.path.join(df['path'].iloc[iPath],df['file'].iloc[iPath])\n src = os.path.join(df['path'].iloc[iPath],df['file'].iloc[iPath].replace('_small',''))\n dst = os.path.join(rootDir, fraction, df['lifeSpanDuration'].iloc[iPath], df['file'].iloc[iPath].replace(\" \", \"__\"))\n print(\"copy:\\n\"+src+\"\\n to:\\n \"+dst)\n copyfile(src, dst)\n\nsplitFractionTrainValidate = 0.7\nsplitFractionValidateTest = 0.9\nmodelDir = \"/home/evgeny/work/!DeepLongevity/Models\"\ndataDir = \"/home/evgeny/work/!DeepLongevity/day1_data_fullres\"\ncheckDir(dataDir)\ntrainDir = os.path.join(dataDir,\"train\")\ncheckDir(trainDir)\nvalidateDir = os.path.join(dataDir,\"validate\")\ncheckDir(validateDir)\ntestDir = os.path.join(dataDir,\"test\")\ncheckDir(testDir)\n\n\n\n# load data table\ndataFile = os.path.join(modelDir,\"data_mid_point_1days_lables.csv\")\ndataTable = pd.read_table(dataFile, delimiter=\",\")\nclassNames = dataTable['lifeSpanDuration'].unique().tolist()\n\nmakeClassSubdir(trainDir,classNames)\nmakeClassSubdir(validateDir,classNames)\nmakeClassSubdir(testDir,classNames)\n# Split data table\nlongLength = len(dataTable.loc[dataTable['lifeSpanDuration'] == 'long', 'lifeSpanHours'])\nmediumLength = len(dataTable.loc[dataTable['lifeSpanDuration'] == 'medium', 'lifeSpanHours'])\nshortLength = len(dataTable.loc[dataTable['lifeSpanDuration'] == 'short', 'lifeSpanHours'])\n\nlongData = dataTable.loc[dataTable['lifeSpanDuration'] == 'long', ('lifeSpanDuration', 'path', 'file') ]\nmediumData = dataTable.loc[dataTable['lifeSpanDuration'] == 'medium', ('lifeSpanDuration', 'path', 'file') ]\nshortData = dataTable.loc[dataTable['lifeSpanDuration'] == 'short', ('lifeSpanDuration', 'path', 'file') ]\n\nlongTrainData, longValidateData, longTestData = dataSplitter(splitFractionTrainValidate, splitFractionValidateTest, longLength, longData)\nsortImages(longTrainData, dataDir, 'train')\nsortImages(longValidateData, dataDir, 'validate')\nsortImages(longTestData, dataDir, 'test')\n\nmediumTrainData, mediumValidateData, mediumTestData = dataSplitter(splitFractionTrainValidate, splitFractionValidateTest, mediumLength, mediumData)\nsortImages(mediumTrainData, dataDir, 'train')\nsortImages(mediumValidateData, dataDir, 'validate')\nsortImages(mediumTestData, dataDir, 'test')\n\nshortTrainData, shortValidateData, shortTestData = dataSplitter(splitFractionTrainValidate, splitFractionValidateTest, shortLength, shortData)\nsortImages(shortTrainData, dataDir, 'train')\nsortImages(shortValidateData, dataDir, 'validate')\nsortImages(shortTestData, dataDir, 'test')\n\n\n#trainData = pd.concat([longTrainData, mediumTrainData, shortTrainData], ignore_index=True, join='outer').sample(frac=1).reset_index(drop=True)\n#trainData = pd.concat([trainData, pd.get_dummies(trainData['lifeSpanDuration'])], axis=1)\n#validateData = pd.append([longValidateData, mediumValidateData, shortValidateData]).sample(frac=1).reset_index(drop=True)\n#validateData = pd.concat([validateData, pd.get_dummies(validateData['lifeSpanDuration'])], axis=1)\n#testData = pd.append([longTestData, mediumTestData, shortTestData]).sample(frac=1).reset_index(drop=True)\n#testData = pd.concat([testData, pd.get_dummies(testData['lifeSpanDuration'])], axis=1)","sub_path":"wormSorter.py","file_name":"wormSorter.py","file_ext":"py","file_size_in_byte":4002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"594937131","text":"import sys\nimport math\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\n\n\nclass Createmenudemo(QMainWindow):\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.setWindowTitle(\"日历控件演示\")\n self.resize(300, 200)\n \n\n bar=self.menuBar()\n menu=bar.addMenu(\"文件\")\n open=menu.addAction(\"打开\")\n save=QAction(\"保存\",self)\n save.setShortcut(\"Ctrl+s\")\n menu.addAction(save)\n\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n main = Createmenudemo()\n main.show()\n sys.exit(app.exec_())\n","sub_path":"Menu/Createmenu.py","file_name":"Createmenu.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"25295407","text":"import rclpy\nfrom rclpy.node import Node\nfrom rclpy.duration import Duration\nimport tf2_ros\nimport numpy as np\nimport geometry_msgs\nfrom geometry_msgs.msg import Quaternion, Twist, Point\nfrom std_msgs.msg import Float32\nfrom rclpy.qos import qos_profile_system_default\n\nfrom numpy import cos, sin, arctan, arctan2, pi, cross, hstack, array, log, sign\nfrom numpy.linalg import det, norm\n\n\nclass controller(Node):\n def __init__(self):\n super().__init__(\"controller\")\n\n self.tfBuffer = tf2_ros.Buffer()\n self.listener = tf2_ros.TransformListener(self.tfBuffer, self)\n\n self.pub_cmd = self.create_publisher(Twist, \"/cmd_vel\", qos_profile_system_default)\n self.create_subscription(Point, \"/pointA\", self.getA, qos_profile_system_default)\n self.create_subscription(Point, \"/pointB\", self.getB, qos_profile_system_default)\n self.create_subscription(Float32, \"/vel\", self.getVel, qos_profile_system_default)\n\n self.a = np.array([[-3.5, 13]]).T\n self.b = np.array([[5, 13]]).T\n \n self.vel = 0\n\n timer_period = 0.5 # seconds\n self.timer = self.create_timer(timer_period, self.getTransform)\n\n\n def sawtooth(self, x):\n return (x+pi) % (2*pi)-pi\n \n def getVel(self, msg):\n self.vel = msg.data\n\n def getA(self, msg):\n self.a = np.array([[msg.x, msg.y]]).T\n \n def getB(self, msg):\n self.b = np.array([[msg.x, msg.y]]).T\n\n\n def control(self, x, y, yaw, a, b):\n r = .5\n m = np.array([[x, y]]).T\n phi = arctan2(b[1, 0]-a[1, 0], b[0, 0]-a[0, 0])\n\n # ------------ Ajout ecart a la ligne --------\n ke = 0.5\n e = det(hstack((b-a, m-a)))/norm(b-a)\n thetaBar = phi - ke*arctan(e/r)\n return self.sawtooth(thetaBar - yaw)\n\n def getTransform(self):\n try:\n #transReal = self.tfBuffer.lookup_transform('odom', 'body', self.get_clock().now(), timeout=Duration(seconds=1.0))\n transform = self.tfBuffer.lookup_transform('odom', 'base_link', tf2_ros.Time())\n q = transform.transform.rotation\n siny_cosp = 2 * (q.w * q.z + q.x * q.y)\n cosy_cosp = 1 - 2 * (q.y * q.y + q.z * q.z)\n yaw = arctan2(siny_cosp, cosy_cosp)\n\n u = self.control(transform.transform.translation.x, transform.transform.translation.y, yaw, self.a, self.b)\n\n m = Twist()\n m.angular.z = u\n m.linear.x = float(self.vel)\n\n self.pub_cmd.publish(m)\n\n except Exception as e:\n self.get_logger().error(str(e))\n\n\n \n\ndef main(args=None):\n rclpy.init(args=args)\n node = controller()\n \n rclpy.spin(node)\n\n # Destroy the node explicitly\n # (optional - otherwise it will be done automatically\n # when the garbage collector destroys the node object)\n node.destroy_node()\n rclpy.shutdown()\n\nif __name__ == '__main__':\n main()","sub_path":"crabe_controller/crabe_controller/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"20665112","text":"# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n def __repr__(self):\n return ''.join([self.__class__.__name__, '(', str(self.val), ')'])\n\nfrom collections import deque\ndef construct_tree(data):\n q = deque()\n root = TreeNode(data[0])\n q.append(root)\n i = 1\n while i < len(data):\n par = q.popleft()\n if data[i] is not None:\n par.left = TreeNode(data[i])\n i += 1\n if i < len(data) and data[i] is not None:\n par.right = TreeNode(data[i])\n i += 1\n if par.left is not None:\n q.append(par.left)\n if par.right is not None:\n q.append(par.right)\n return root\n","sub_path":"serialize.py","file_name":"serialize.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"529279717","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 14 08:47:04 2018\n\n@author: Daniel Wehner\n\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports and global settings\n# -----------------------------------------------------------------------------\nimport numpy as np\nfrom numpy import linalg\nnp.random.seed= 42\n\nimport pandas as pd\n\nimport cvxopt\nimport cvxopt.solvers\n\nfrom sklearn.datasets import make_circles\n\nimport pylab as pl\npl.rcParams[\"figure.figsize\"] = (15.0, 12.0)\npl.rcParams.update({\"font.size\": 18})\n \n# -----------------------------------------------------------------------------\n# Kernels\n# -----------------------------------------------------------------------------\ndef linear_kernel(x, y):\n \"\"\"\n Linear kernel. Returns the dot product of x and y.\n \n :param x: data point 1\n :param y: data point 2\n :return:\n \"\"\"\n return np.dot(x, y)\n\n\ndef polynomial_kernel(x, y, p=3):\n \"\"\"\n Polynomial kernel.\n \n :param x: data point 1\n :param y: data point 2\n :param p: degree of the polynomial\n :return:\n \"\"\"\n return (1 + np.dot(x, y)) ** p\n\n\ndef gaussian_kernel(x, y, sigma=5.0):\n \"\"\"\n Gaussian (RBF = radial basis function) kernel.\n \n :param x: data point 1\n :param y: data point 2\n :param sigma: standard deviation\n :return:\n \"\"\"\n return np.exp(-linalg.norm(x-y) ** 2 / (2 * (sigma ** 2)))\n\n\ndef laplace_kernel(x, y, sigma=5.0):\n \"\"\"\n Laplace kernel.\n \n :param x: data point 1\n :param y: data point 2\n :param sigma: standard deviation\n :return:\n \"\"\"\n return np.exp(-linalg.norm(x-y) / (2 * (sigma ** 2)))\n\n\n# -----------------------------------------------------------------------------\n# SVM class\n# -----------------------------------------------------------------------------\nclass SVM(object):\n \"\"\"\n SVM class.\n Implements the Support Vector Machines algorithm.\n Can only handle two classes which have to be +1 and -1!\n \"\"\"\n \n \n def __init__(self, kernel=linear_kernel, C=None):\n \"\"\"\n Constructor.\n \n :param kernel: kernel to use (linear, polynomial, gaussian (=rbf), ...)\n :param C: slack (soft margin)\n \"\"\"\n self.kernel = kernel\n self.C = C\n if self.C is not None: self.C = float(self.C)\n\n\n def fit(self, X, y):\n \"\"\"\n Solves the optimization problem and\n calculates the lagrange multipliers.\n \n :param X: predictors/features\n :param y: labels\n \"\"\"\n n_samples, n_features = X.shape\n\n # ---------------------------------------------------------------------\n # Create matrices which are needed by 'cvxopt'\n # ---------------------------------------------------------------------\n # gram matrix\n K = np.zeros((n_samples, n_samples))\n for i in range(n_samples):\n for j in range(n_samples):\n # apply kernel for each pair of i and j\n K[i,j] = self.kernel(X[i], X[j])\n\n P = cvxopt.matrix(np.outer(y, y) * K)\n q = cvxopt.matrix(np.ones(n_samples) * -1)\n A = cvxopt.matrix(y, (1, n_samples), \"d\")\n b = cvxopt.matrix(0.0)\n\n if self.C is None:\n G = cvxopt.matrix(np.diag(np.ones(n_samples) * -1))\n h = cvxopt.matrix(np.zeros(n_samples))\n else:\n tmp1 = np.diag(np.ones(n_samples) * -1)\n tmp2 = np.identity(n_samples)\n G = cvxopt.matrix(np.vstack((tmp1, tmp2)))\n tmp1 = np.zeros(n_samples)\n tmp2 = np.ones(n_samples) * self.C\n h = cvxopt.matrix(np.hstack((tmp1, tmp2)))\n\n # solve qp problem\n solution = cvxopt.solvers.qp(P, q, G, h, A, b)\n\n # lagrange multipliers\n a = np.ravel(solution[\"x\"])\n\n # support vectors have non-zero lagrange multipliers\n sv = a > 1e-5\n ind = np.arange(len(a))[sv]\n self.a = a[sv]\n self.sv = X[sv]\n self.sv_y = y[sv]\n print(\"%d support vectors out of %d points\" % (len(self.a), n_samples))\n\n # intercept\n self.b = 0\n for n in range(len(self.a)):\n self.b += self.sv_y[n]\n self.b -= np.sum(self.a * self.sv_y * K[ind[n],sv])\n self.b /= len(self.a)\n\n # weight vector\n if self.kernel == linear_kernel:\n self.w = np.zeros(n_features)\n for n in range(len(self.a)):\n self.w += self.a[n] * self.sv_y[n] * self.sv[n]\n else:\n self.w = None\n\n\n def project(self, X):\n \"\"\"\n Projects data for prediction.\n (Computes the value of y(x), without the sign function)\n \n :param X: data to be projected\n :return: projection\n \"\"\"\n if self.w is not None:\n return np.dot(X, self.w) + self.b\n else:\n y_predict = np.zeros(len(X))\n for i in range(len(X)):\n s = 0\n for a, sv_y, sv in zip(self.a, self.sv_y, self.sv):\n s += a * sv_y * self.kernel(X[i], sv)\n y_predict[i] = s\n return y_predict + self.b\n\n\n def predict(self, X):\n \"\"\"\n Predicts the class of new data instances.\n (Returns the sign of the projection as computes by project(...) method)\n \n :param X: data to be classified\n :return: prediction\n \"\"\"\n return np.sign(self.project(X))\n\n\n# -----------------------------------------------------------------------------\n# Main\n# -----------------------------------------------------------------------------\nif __name__ == \"__main__\":\n \n # -------------------------------------------------------------------------\n # Create data sets\n # -------------------------------------------------------------------------\n def gen_lin_separable_data():\n \"\"\"\n Generates linearly separable data.\n \n :return: data\n \"\"\"\n mean1 = np.array([0, 2])\n mean2 = np.array([2, 0])\n cov = np.array([[0.8, 0.6], [0.6, 0.8]])\n X1 = np.random.multivariate_normal(mean1, cov, 100)\n y1 = np.ones(len(X1))\n X2 = np.random.multivariate_normal(mean2, cov, 100)\n y2 = np.ones(len(X2)) * -1\n return X1, y1, X2, y2\n\n\n def gen_non_lin_separable_data():\n \"\"\"\n Generates non-linearly separable data.\n \n :return: data\n \"\"\"\n mean1 = [-1, 2]\n mean2 = [1, -1]\n mean3 = [4, -4]\n mean4 = [-4, 4]\n cov = [[1.0,0.8], [0.8, 1.0]]\n X1 = np.random.multivariate_normal(mean1, cov, 50)\n X1 = np.vstack((X1, np.random.multivariate_normal(mean3, cov, 50)))\n y1 = np.ones(len(X1))\n X2 = np.random.multivariate_normal(mean2, cov, 50)\n X2 = np.vstack((X2, np.random.multivariate_normal(mean4, cov, 50)))\n y2 = np.ones(len(X2)) * -1\n return X1, y1, X2, y2\n\n\n def gen_lin_separable_overlap_data():\n \"\"\"\n Generates linearly separable data with overlap.\n\n :return: data \n \"\"\"\n mean1 = np.array([0, 2])\n mean2 = np.array([2, 0])\n cov = np.array([[1.5, 1.0], [1.0, 1.5]])\n X1 = np.random.multivariate_normal(mean1, cov, 100)\n y1 = np.ones(len(X1))\n X2 = np.random.multivariate_normal(mean2, cov, 100)\n y2 = np.ones(len(X2)) * -1\n return X1, y1, X2, y2\n \n \n def gen_circular_data():\n \"\"\"\n Generates a circular data set.\n \n :return: data\n \"\"\"\n X, y = make_circles(n_samples=400, factor=.3, noise=.05)\n y[np.where(y == 0)] = -1 \n return X, y\n\n\n # -------------------------------------------------------------------------\n # Data splitting\n # -------------------------------------------------------------------------\n def split_train(X1, y1, X2, y2):\n \"\"\"\n Splits training data.\n \n :param X1:\n :param y1:\n :param X2:\n :param y2:\n :return:\n \"\"\"\n X1_train = X1[:90]\n y1_train = y1[:90]\n X2_train = X2[:90]\n y2_train = y2[:90]\n X_train = np.vstack((X1_train, X2_train))\n y_train = np.hstack((y1_train, y2_train))\n return X_train, y_train\n\n\n def split_test(X1, y1, X2, y2):\n \"\"\"\n Splits test data.\n \n :param X1:\n :param y1:\n :param X2:\n :param y2:\n :return:\n \"\"\"\n X1_test = X1[90:]\n y1_test = y1[90:]\n X2_test = X2[90:]\n y2_test = y2[90:]\n X_test = np.vstack((X1_test, X2_test))\n y_test = np.hstack((y1_test, y2_test))\n return X_test, y_test\n\n\n # -------------------------------------------------------------------------\n # Data visualization/plotting\n # -------------------------------------------------------------------------\n def plot_margin(X1_train, X2_train, clf):\n \"\"\"\n Plots the margin.\n \n :param X1_train:\n :param X2_train:\n :param clf:\n \"\"\"\n \n def f(x, w, b, c=0):\n # given x, return y such that [x,y] in on the line\n # w.x + b = c\n return (-w[0] * x - b + c) / w[1]\n\n pl.plot(X1_train[:,0], X1_train[:,1], \"ro\")\n pl.plot(X2_train[:,0], X2_train[:,1], \"bo\")\n pl.scatter(clf.sv[:,0], clf.sv[:,1], s=100, c=\"g\")\n\n # w.x + b = 0\n a0 = -4; a1 = f(a0, clf.w, clf.b)\n b0 = 4; b1 = f(b0, clf.w, clf.b)\n pl.plot([a0,b0], [a1,b1], \"k\")\n\n # w.x + b = 1\n a0 = -4; a1 = f(a0, clf.w, clf.b, 1)\n b0 = 4; b1 = f(b0, clf.w, clf.b, 1)\n pl.plot([a0,b0], [a1,b1], \"k--\")\n\n # w.x + b = -1\n a0 = -4; a1 = f(a0, clf.w, clf.b, -1)\n b0 = 4; b1 = f(b0, clf.w, clf.b, -1)\n pl.plot([a0,b0], [a1,b1], \"k--\")\n\n pl.axis(\"tight\")\n pl.show()\n\n\n def plot_contour(X1_train, X2_train, clf):\n \"\"\"\n Plots the contours.\n \n :param X1_train:\n :param X2_train:\n :param clf:\n \"\"\"\n pl.plot(X1_train[:,0], X1_train[:,1], \"ro\")\n pl.plot(X2_train[:,0], X2_train[:,1], \"bo\")\n pl.scatter(clf.sv[:,0], clf.sv[:,1], s=100, c=\"g\")\n \n max_val = np.amax(\n np.append(\n [np.amax(X1_train, axis=0)],\n [np.amax(X2_train, axis=0)], axis=0\n ),\n axis=0\n )\n min_val = np.amin(\n np.append(\n [np.amin(X1_train, axis=0)],\n [np.amin(X2_train, axis=0)], axis=0\n ),\n axis=0)\n\n X1, X2 = np.meshgrid(\n np.linspace(\n min_val[0] - 1, max_val[0] + 1, 50\n ),\n np.linspace(\n min_val[1] - 1, max_val[1] + 1, 50\n )\n )\n X = np.array([[x1, x2] for x1, x2 in zip(np.ravel(X1), np.ravel(X2))])\n Z = clf.project(X).reshape(X1.shape)\n pl.contour(X1, X2, Z, [0.0], colors=\"k\", linewidths=2, origin=\"lower\")\n pl.contour(X1, X2, Z + 1, [0.0], colors=\"grey\",\n linewidths=1.5, linestyles=\"dashed\", origin=\"lower\")\n pl.contour(X1, X2, Z - 1, [0.0], colors=\"grey\",\n linewidths=1.5, linestyles=\"dashed\", origin=\"lower\")\n\n pl.axis(\"tight\")\n pl.show()\n\n\n # -------------------------------------------------------------------------\n # Testing\n # -------------------------------------------------------------------------\n def test_linear():\n \"\"\"\n Tests linearly separable data.\n \"\"\"\n X1, y1, X2, y2 = gen_lin_separable_data()\n X_train, y_train = split_train(X1, y1, X2, y2)\n X_test, y_test = split_test(X1, y1, X2, y2)\n\n clf = SVM()\n clf.fit(X_train, y_train)\n\n y_predict = clf.predict(X_test)\n correct = np.sum(y_predict == y_test)\n print(\"%d out of %d predictions correct\" % (correct, len(y_predict)))\n\n plot_contour(X_train[y_train==1], X_train[y_train==-1], clf)\n\n\n def test_non_linear():\n \"\"\"\n Tests non-linearly separable data.\n \"\"\"\n X1, y1, X2, y2 = gen_non_lin_separable_data()\n X_train, y_train = split_train(X1, y1, X2, y2)\n X_test, y_test = split_test(X1, y1, X2, y2)\n\n clf = SVM(polynomial_kernel)\n clf.fit(X_train, y_train)\n\n y_predict = clf.predict(X_test)\n correct = np.sum(y_predict == y_test)\n print(\"%d out of %d predictions correct\" % (correct, len(y_predict)))\n\n plot_contour(X_train[y_train==1], X_train[y_train==-1], clf)\n\n\n def test_soft():\n \"\"\"\n Tests soft margin with linearly separable but overlapping data.\n \"\"\"\n X1, y1, X2, y2 = gen_lin_separable_overlap_data()\n X_train, y_train = split_train(X1, y1, X2, y2)\n X_test, y_test = split_test(X1, y1, X2, y2)\n\n clf = SVM(C=1000)\n clf.fit(X_train, y_train)\n\n y_predict = clf.predict(X_test)\n correct = np.sum(y_predict == y_test)\n print(\"%d out of %d predictions correct\" % (correct, len(y_predict)))\n\n plot_contour(X_train[y_train==1], X_train[y_train==-1], clf)\n \n \n def test_non_linear_circular():\n \"\"\"\n Tests non-linearly separable (circular) data.\n \"\"\"\n X_train, y_train = gen_circular_data()\n\n clf = SVM(polynomial_kernel)\n clf.fit(X_train, y_train)\n\n plot_contour(X_train[y_train==1], X_train[y_train==-1], clf)\n \n \n def test_iris():\n \"\"\"\n Runs SVM on iris data set.\n \"\"\"\n data = pd.read_csv(\"../data/irisTwoClasses.txt\", sep=\",\")\n X_train = np.asarray(data[[\"sepal_length_cm\", \"petal_length_cm\"]])\n y_train = np.asarray(data[\"class\"])\n \n clf = SVM(gaussian_kernel, C=1000)\n clf.fit(X_train, y_train)\n \n plot_contour(X_train[y_train==1], X_train[y_train==-1], clf)\n\n \n # -------------------------------------------------------------------------\n # Run the SVM!\n # -------------------------------------------------------------------------\n test_linear()\n test_non_linear()\n test_soft()\n test_iris()\n test_non_linear_circular()","sub_path":"algorithms/svm/soft_svm_with_kernel.py","file_name":"soft_svm_with_kernel.py","file_ext":"py","file_size_in_byte":14443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"286569666","text":"import unittest\nfrom intrepyd.iec611312py.plcopen import parse_plc_open_file\nfrom intrepyd.iec611312py.parsest import parse_st\nfrom intrepyd.iec611312py.variable import Variable\nfrom intrepyd.iec611312py.stmtprinter import StmtPrinter\nfrom intrepyd.iec611312py.flattener import Flattener\nfrom intrepyd.iec611312py.datatype import Primitive\nfrom . import from_fixture_path\n\nboolType = Primitive('BOOL')\nintType = Primitive('INT')\n\nclass TestSTFlattener(unittest.TestCase):\n def _run_tests(self, program, name2var):\n statements = parse_st(program[0], name2var, {})\n flattener = Flattener()\n flattened_statements = flattener.flatten_stmt_block(statements)\n printer = StmtPrinter()\n printer.processStatements(flattened_statements)\n actual = printer.result\n expected = program[1]\n self.assertEqual(expected, actual)\n\n def test_assignment_1(self):\n name2var = {\n 'In1' : Variable('In1', boolType, Variable.INPUT),\n 'In2' : Variable('In2', boolType, Variable.INPUT),\n 'In3' : Variable('In3', boolType, Variable.INPUT),\n 'Out1' : Variable('Out1', boolType, Variable.OUTPUT)\n }\n program = (\n 'Out1 := In1 AND In2;', 'Out1 := (In1 AND In2);'\n )\n self._run_tests(program, name2var)\n\n def test_assignment_2(self):\n name2var = {\n 'a' : Variable('a', boolType, Variable.LOCAL),\n 'b' : Variable('b', boolType, Variable.LOCAL),\n 'c' : Variable('c', boolType, Variable.LOCAL),\n 'd' : Variable('d', boolType, Variable.LOCAL)\n }\n program = (\n \"\"\"\n a := b;\n c := d;\n \"\"\",\n 'a := b;c := d;'\n )\n self._run_tests(program, name2var)\n\n def test_if_1(self):\n name2var = {\n 'a' : Variable('a', boolType, Variable.LOCAL),\n 'b' : Variable('b', boolType, Variable.LOCAL),\n 'c' : Variable('c', boolType, Variable.LOCAL),\n }\n program = (\n \"\"\"\n IF a THEN\n b := c;\n END_IF;\n \"\"\",\n 'b := ite(a, c, b);'\n )\n self._run_tests(program, name2var)\n\n def test_if_2(self):\n name2var = {\n 'a' : Variable('a', boolType, Variable.LOCAL),\n 'b' : Variable('b', boolType, Variable.LOCAL),\n 'c' : Variable('c', boolType, Variable.LOCAL),\n 'd' : Variable('d', boolType, Variable.LOCAL),\n }\n program = (\n \"\"\"\n IF a THEN\n b := c;\n ELSE\n b := d;\n END_IF;\n \"\"\",\n 'b := ite(a, c, d);'\n )\n self._run_tests(program, name2var)\n\n def test_if_3(self):\n name2var = {\n 'a' : Variable('a', boolType, Variable.LOCAL),\n 'b' : Variable('b', boolType, Variable.LOCAL),\n 'c' : Variable('c', boolType, Variable.LOCAL),\n 'd' : Variable('d', boolType, Variable.LOCAL),\n }\n program = (\n \"\"\"\n IF a THEN\n b := c;\n ELSE\n d := c;\n END_IF;\n \"\"\",\n 'b := ite(a, c, b);d := ite(a, d, c);'\n )\n self._run_tests(program, name2var)\n\n def test_if_4(self):\n name2var = {\n 'a' : Variable('a', boolType, Variable.LOCAL),\n 'b' : Variable('b', boolType, Variable.LOCAL),\n 'c' : Variable('c', boolType, Variable.LOCAL),\n }\n program = (\n \"\"\"\n IF a THEN\n IF b THEN\n b := c;\n END_IF;\n END_IF;\n \"\"\",\n 'b := ite(a, ite(b, c, b), b);'\n )\n self._run_tests(program, name2var)\n\n def test_if_5(self):\n name2var = {\n 'a' : Variable('a', boolType, Variable.LOCAL),\n 'b' : Variable('b', boolType, Variable.LOCAL),\n 'c' : Variable('c', boolType, Variable.LOCAL),\n 'd' : Variable('d', boolType, Variable.LOCAL)\n }\n program = (\n \"\"\"\n IF a THEN\n b := c;\n ELSE\n c := b;\n END_IF;\n \"\"\",\n 'b___1 := ite(a, c, b);b := ite(a, c, b);c := ite(a, c, b___1);'\n )\n self._run_tests(program, name2var)\n\n def test_case_1(self):\n name2var = {\n 'a' : Variable('a', intType, Variable.LOCAL),\n 'b' : Variable('b', boolType, Variable.LOCAL)\n }\n program = (\n \"\"\"\n CASE a OF\n 0:\n b := 0;\n 1:\n b := 1;\n ELSE\n b := 2;\n END_CASE;\n \"\"\",\n 'b := ite((a = 0), 0, ite((a = 1), 1, ite((a = a), 2, b)));'\n )\n self._run_tests(program, name2var)\n\n def test_integration_1(self):\n pous = parse_plc_open_file(from_fixture_path('openplc/simple1.xml'))\n self.assertEqual(1, len(pous))\n flattener = Flattener()\n flattened_statements = flattener.flatten_stmt_block(pous[0].statements)\n printer = StmtPrinter()\n printer.processStatements(flattened_statements)\n self.assertEqual('output1 := (local1 + input1);', printer.result)\n\n def test_integration_2(self):\n pous = parse_plc_open_file(from_fixture_path('openplc/if1.xml'))\n self.assertEqual(1, len(pous))\n flattener = Flattener()\n flattened_statements = flattener.flatten_stmt_block(pous[0].statements)\n printer = StmtPrinter()\n printer.processStatements(flattened_statements)\n self.assertEqual('c_is_active_c2_GPCA_SW_Logi := ite((c_is_active_c2_GPCA_SW_Logi = 0), 1, ite((c_is_c2_GPCA_SW_Logical_Arc = 1), 2, 3));',\n printer.result)\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"intrepyd/tests/test_st_flattener.py","file_name":"test_st_flattener.py","file_ext":"py","file_size_in_byte":5945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"206359648","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: edwardahn\n\nThis file implements a ROS node that takes in camera and joint angle\ninformation to spot occlusions to the end effector by the arm.\nInternally, the class OcclusionRenderer is used to render output\nimages.\n\"\"\"\n\nimport message_filters\nimport rospkg\nimport rospy\nfrom cv_bridge import CvBridge\nfrom sensor_msgs.msg import Image, CameraInfo, JointState\n\nfrom OcclusionRenderer import OcclusionRenderer\n\n\nclass OcclusionRendererNode:\n \"\"\"\n This class is a wrapper to the OcclusionRenderer class that\n handles ROS functionality.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize and run node responsible for occlusion rendering.\n \"\"\"\n self.bridge = CvBridge()\n rospy.init_node('occlusion_renderer')\n\n # Instantiate OcclusionRenderer object\n self.pkg_path = rospkg.RosPack().get_path('occlusion_render')\n sawyer_dae = '%s/models/sawyer.dae' % self.pkg_path\n self.renderer = OcclusionRenderer(sawyer_dae)\n self.renderer.setup_sensor()\n\n # Publish renders onto topic\n self.publisher = rospy.Publisher(\n '/pose_image/occlusion_render', Image, queue_size=1)\n\n # Register callback subscribing to image and camera info\n image_sub = message_filters.Subscriber(\n '/pose_image/image', Image)\n info_sub = message_filters.Subscriber(\n '/pose_image/camera_info', CameraInfo)\n image_sync = message_filters.TimeSynchronizer(\n [image_sub, info_sub], 1)\n image_sync.registerCallback(self.image_callback)\n\n # Register callback subscribing to joint angles\n rospy.Subscriber('/robot/joint_states', JointState,\n self.joints_callback)\n\n\n def image_callback(self, image, camera_info):\n \"\"\"\n Publish rendering of camera's image view. Note that the\n original image and camera info are not used, but we subscribe\n to them since the rendering must be published at the same\n timestamp as the image.\n\n Args:\n image: Image that the camera actually sees (ground truth)\n camera_info: Camera information (ex. intrinsics)\n \"\"\"\n render = self.renderer.get_rendered_image()\n image_message = self.bridge.cv2_to_imgmsg(render, 'rgb8')\n image_message.header.stamp = image.header.stamp\n self.publisher.publish(image_message)\n\n\n def joints_callback(self, joint_state):\n \"\"\"\n Set new joint angles every time this callback is called.\n\n Args:\n joint_state: A list of joint angles and gripper angle.\n Note that the torso angle (last entry to this list)\n is ignored.\n \"\"\"\n positions = joint_state.position\n joint_angles = [angle for angle in positions[:-1]]\n self.renderer.set_joint_angles(joint_angles)\n\n\nif __name__ == '__main__':\n node = OcclusionRendererNode()\n rospy.spin()\n","sub_path":"src/OcclusionRendererNode.py","file_name":"OcclusionRendererNode.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"263107454","text":"import time, sys\n\ndef printGrid(grid):\n for i in range(9):\n print(grid[i])\n \ndef isPossible(posX, posY, number, grid):\n # Column\n for i in range(9):\n if grid[posX][i] == number:\n return False\n\n # Row\n for i in range(9):\n if grid[i][posY] == number:\n return False\n\n squareX = (posX//3) * 3\n squareY = (posY//3) * 3\n\n # Diagonals\n for i in range(0,3):\n for j in range(0,3):\n if grid[squareX + i][squareY + j] == number:\n return False\n\n return True\n\ndef solve(grid):\n for x in range(9):\n for y in range(9):\n if grid[x][y] == 0:\n for number in range(1,10):\n if isPossible(x,y,number, grid):\n grid[x][y] = number\n solve(grid)\n grid[x][y] = 0\n\n return\n printGrid(grid)\n\ndef create_grid(filename):\n try:\n with open(filename, \"r\") as f:\n lines = f.readlines()\n\n name = lines[0].strip()\n grid = [list(map(int, l.strip())) for l in lines[1:]]\n\n return name, grid\n except:\n print(f\"ERROR: The file {filename} could not be found!\")\n return \"\", []\n\ndef main():\n name, grid = create_grid(sys.argv[1])\n if grid == []: return\n start_time = time.time()\n solve(grid)\n print(\"%s -> %s seconds\" % (name, time.time() - start_time))\n\nif __name__ == \"__main__\":\n main()","sub_path":"sudoku_solver.py","file_name":"sudoku_solver.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"46801579","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport numpy as np\nimport torchvision\nimport os\nfrom torchvision import datasets,models,transforms\nimport matplotlib.pyplot as plt\nimport torch.utils.data as Data\nimport time\nfrom torch.optim import lr_scheduler\n\nplt.ion()\n\n\n# load data\n\ndata_transforms = {\n 'wheat_data':transforms.Compose([\n transforms.ToTensor()\n ]),\n 'wheat_test':transforms.Compose([\n transforms.ToTensor()\n ])\n}\n\ndata_dir = 'data'\nimage_datasets = { x:datasets.ImageFolder(os.path.join(data_dir,x),data_transforms[x]) for x in ['wheat_data','wheat_test'] }\ndataloaders = { x: Data.DataLoader(image_datasets[x],batch_size=4,shuffle=True,num_workers=4) for x in ['wheat_data','wheat_test'] }\ndataset_sizes = { x:len(image_datasets[x]) for x in ['wheat_data','wheat_test'] }\nclass_names = image_datasets['wheat_data'].classes\nuse_gpu = torch.cuda.is_available()\n\ndef imshow(inp,title=None):\n '''显示Tensor类型的图片'''\n inp = inp.numpy().transpose((1,2,0))#调整通道顺序\n plt.imshow(inp)\n if title is not None:\n plt.title(title)\n plt.show()\n\n#一批训练集\ninputs,classes = next(iter(dataloaders['wheat_data']))\n#对图片制作网格\nout = torchvision.utils.make_grid(inputs)\nimshow(out,title=[class_names[x] for x in classes])\n\n\n\ndef train_model(model,criterion,optimizer,scheduler,num_epochs=25):\n since = time.time()\n best_acc = 0.0\n best_model_wts = model.state_dict()\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch,num_epochs-1))\n print('-'*10)\n\n for phase in ['wheat_data','wheat_test']:\n if phase == 'wheat_data':\n scheduler.step()\n model.train(True)# 将魔心设置为训练模式\n else:\n model.train(False)\n running_loss = 0.0\n running_corrects = 0\n\n #迭代数据\n for data in dataloaders[phase]:\n #得到输入数据\n inputs,labels = data\n #包装\n if use_gpu:\n inputs = Variable(inputs.cuda())\n labels = Variable(labels.cuda())\n else:\n inputs,labels = Variable(inputs),Variable(labels)\n # 梯度归零\n optimizer.zero_grad()\n\n #向前传播\n outputs = model(inputs)\n _,preds = torch.max(outputs.data,1)\n loss = criterion(outputs,labels)\n #反向传播+参数优化 如果是处于训练时期\n if phase == 'wheat_data':\n loss.backward()\n optimizer.step()\n #对每次迭代的loss 和accuracy 求和\n running_loss += loss.data[0]\n running_corrects += torch.sum(preds == labels.data)\n #统计每轮平均loss 和 accuracy\n epoch_loss = running_loss/dataset_sizes[phase]\n epoch_acc = running_corrects / dataset_sizes[phase]\n print('{} loss:{:.4f} acc:{:.4f}'.format(phase,epoch_loss,epoch_acc))\n\n # 保存最好的模型\n if phase == 'wheat_test' and epoch_acc>best_acc:\n best_acc = epoch_acc\n best_model_wts = model.state_dict()\n print()\n\n time_elapsed = time.time()-since\n print('training complete in {:.0f}m {:.0f}s'.format(time_elapsed/60,time_elapsed%60))\n print('best wheat_test acc:{:4f}'.format(best_acc))\n model.load_state_dict(best_model_wts)\n return model\n\n\ndef visualize_model(model,num_images=6):\n images_so_far = 0\n fig = plt.figure()\n for i,data in enumerate(dataloaders['wheat_test']):\n inputs,labels = data\n if use_gpu:\n inputs,labels = Variable(inputs.cuda()),Variable(labels.cuda())\n else:\n inputs,labels = Variable(inputs),Variable(labels)\n outputs = model(inputs)\n _,preds = torch.max(outputs.data,1)\n for j in range(inputs.size()[0]):\n images_so_far += 1\n ax = plt.subplot(num_images/2,2,images_so_far)\n ax.axis('off')\n ax.set_title('predicted:{}'.format(class_names[preds[j]]))\n imshow(inputs.cpu().data[j])\n if images_so_far == num_images:\n return\nmodel_ft = models.resnet18(pretrained=True)\nprint(model_ft)\nnum_ftrs = model_ft.fc.in_features\nmodel_ft.fc = nn.Linear(num_ftrs,3)\nif use_gpu:\n model_ft = model_ft.cuda()\ncriterion = nn.CrossEntropyLoss()\noptimizer_ft = optim.SGD(model_ft.parameters(),lr=0.001,momentum=0.9)\nexp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft,step_size=7,gamma=0.1)\nmodel_ft = train_model(model_ft,criterion,optimizer_ft,exp_lr_scheduler,num_epochs=25)\nvisualize_model(model_ft)\ntorch.save(model_ft.state_dict(), 'ill_resnet18.pkl')","sub_path":"resnet18.py","file_name":"resnet18.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"252807252","text":"from django.db import models\n\nclass Author(models.Model):\n name = models.CharField(\n verbose_name = \"Имя или псевдоним\",\n max_length = 200,\n blank = False, null = False,\n )\n def __str__(self) -> str:\n return self.name\n class Meta:\n verbose_name = \"Автор\"\n verbose_name_plural = \"Авторы\"\n\nclass Series(models.Model):\n name = models.CharField(\n verbose_name = \"Серия книг\",\n max_length = 200,\n blank = True, null = True,\n )\n def __str__(self) -> str:\n return self.name\n class Meta:\n verbose_name = \"Серия\"\n verbose_name_plural = \"Серии\"\n\nclass Genre(models.Model):\n name = models.CharField(\n verbose_name = \"Жанр\",\n max_length = 80,\n blank = False, null = False,\n )\n def __str__(self) -> str:\n return self.name\n class Meta:\n verbose_name = \"Жанр\"\n verbose_name_plural = \"Жанры\"\n\nclass Publisher(models.Model):\n name = models.CharField(\n verbose_name = \"Издатель\",\n max_length = 200,\n blank = True, null = True,\n )\n def __str__(self) -> str:\n return self.name\n class Meta:\n verbose_name = \"Издатель\"\n verbose_name_plural = \"Издатели\"\n\n# Дальше идут сугубо пробные записки\n'''\nclass PriceUnits(models.Model):\n name = models.CharField(\n default = \"usd\",\n verbose_name = \"Название\",\n max_length = 40,\n blank = False, null = False,\n )\n\n def __str__(self) -> str:\n return self.name\n class Meta:\n verbose_name = \"Валюта\",\n verbose_name_plural = \"Валюты\",\n\nclass Book(models.Model):\n name = models.CharField(\n verbose_name = \"Название\",\n default = \"default_title\",\n max_length = 100,\n blank = False, null = False,\n )\n # image = models.ImageField()\n price = models.FloatField(\n verbose_name = \"Цена\",\n blank = False, null = False,\n )\n # price_unit = models.ForeignKey(PriceUnits, on_delete = models.PROTECT, related_name = \"products\",)\n description = models.TextField(\n verbose_name = \"Описание\",\n max_length = 800,\n blank = True, null = True,\n )\n created = models.DateTimeField(\n verbose_name = \"Дата внесения\",\n auto_now = False, auto_now_add = True,\n )\n updated = models.DateTimeField(\n verbose_name = \"Дата последнего редактирования\",\n auto_now = True, auto_now_add = False,\n )\n\n def __str__(self) -> str:\n return f\"Товар {self.name}\"\n class Meta:\n verbose_name = \"Товар\"\n verbose_name_plural = \"Товары\"\n'''","sub_path":"src/spravochniki/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"251501040","text":"# Matt Graham\n# ISIT 333 - Lab 7 Part 1\n# 11/10/2021\n\n# TODO\n# run code\n# create a function to list all books\n# make sure it works\n# comment\n# -------------- example output for list\n# Command: list\n#\n# Title: Slaughterhouse Five\n# Author: Kurt Vonnegut\n# Pub Year: 1969\n\n\ndef list_books(book_catalog):\n # iterate through all titles in the catalog\n for title in book_catalog:\n # set book to title item\n book = book_catalog[title]\n # print all book items\n print(\"Title: \" + title)\n print(\"Author: \" + book[\"author\"])\n print(\"Pub year: \" + book[\"pubyear\"])\n print()\n\n\ndef show_book(book_catalog):\n # get user's book title\n title = input(\"Title: \")\n # search through book catalog\n if title in book_catalog:\n book = book_catalog[title]\n print(\"Title: \" + title)\n print(\"Author: \" + book[\"author\"])\n print(\"Pub year: \" + book[\"pubyear\"])\n else:\n print(\"Sorry, \" + title + \" doesn't exist in the catalog.\")\n\n\ndef add_edit_book(book_catalog, mode):\n # get user's target book title\n title = input(\"Title: \")\n # if mode is add, check to make sure book does not exist. Otherwise, add the book\n if mode == \"add\" and title in book_catalog:\n print(title + \" already exists in the catalog.\")\n response = input(\"Would you like to edit it? (y/n): \").lower()\n if response != \"y\":\n return\n # if mode is edit, and book does not exist, ask user if they want to add\n elif mode == \"edit\" and title not in book_catalog:\n print(title + \" doesn't exist in the catalog.\")\n response = input(\"Would you like to add it? (y/n): \").lower()\n if response != \"y\":\n return\n\n # get author and pubyear\n author = input(\"Author name: \")\n pubyear = input(\"Publication year: \")\n\n # Create a dictionary for the book data\n book = {\"author\": author,\n \"pubyear\": pubyear}\n\n # Add the book data to the catalog using title as key\n book_catalog[title] = book\n\n\ndef delete_book(book_catalog):\n # get user's desired book title\n title = input(\"Title: \")\n # it match, delete\n if title in book_catalog:\n del book_catalog[title]\n print(title + \" removed from catalog.\")\n else:\n print(title + \" doesn't exist in the catalog.\")\n\n\ndef display_menu():\n print(\"The Book Catalog program\")\n print()\n print(\"COMMAND MENU\")\n print(\"list - Show all book info\")\n print(\"show - Show book info\")\n print(\"add - Add book\")\n print(\"edit - Edit book\")\n print(\"del - Delete book\")\n print(\"exit - Exit program\")\n\n\ndef main():\n display_menu()\n # set definition\n book_catalog = {\n \"Moby Dick\":\n {\"author\": \"Herman Melville\",\n \"pubyear\": \"1851\"},\n \"The Hobbit\":\n {\"author\": \"J. R. R. Tolkien\",\n \"pubyear\": \"1937\"},\n \"Slaughterhouse Five\":\n {\"author\": \"Kurt Vonnegut\",\n \"pubyear\": \"1969\"}\n }\n while True:\n print()\n # get user input\n command = input(\"Command: \").lower()\n if command == \"list\":\n list_books(book_catalog)\n elif command == \"show\":\n show_book(book_catalog)\n elif command == \"add\":\n add_edit_book(book_catalog, mode=\"add\")\n elif command == \"edit\":\n add_edit_book(book_catalog, mode=\"edit\")\n elif command == \"del\":\n delete_book(book_catalog)\n elif command == \"exit\":\n print(\"Bye!\")\n break\n else:\n print(\"Unknown command. Please try again.\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"school/21_4_ISIT333/dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"347115820","text":"from setuptools import setup, find_packages\nimport os.path\n\nversion = '1.2'\n\nlong_description = '\\n\\n'.join([\n open('README.txt').read(),\n open(os.path.join('src', 'z3c', 'dependencychecker', 'USAGE.txt')).read(),\n open('TODO.txt').read(),\n open('CHANGES.txt').read(),\n ])\n\nsetup(name='z3c.dependencychecker',\n version=version,\n description=\"\",\n long_description=long_description,\n # Get strings from http://www.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[],\n keywords=[],\n author='The Health Agency',\n author_email='techniek@thehealthagency.com',\n url='http://www.thehealthagency.com',\n license='ZPL',\n package_dir={'': 'src'},\n packages=find_packages('src'),\n namespace_packages=['z3c'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n ],\n extras_require = {\n 'test': [\n 'z3c.testsetup>=0.3',\n 'zope.testing',\n ],\n },\n entry_points={\n 'console_scripts':\n ['dependencychecker = z3c.dependencychecker.dependencychecker:main'\n ]},\n )\n","sub_path":"z3c.dependencychecker/tags/1.2/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"263052769","text":"\"\"\"\nhttps://contest.yandex.ru/contest/24750/problems/\nTask A : Solution\n\"\"\"\n\nlength = int(input())\nwidth = int(input())\n\nperimeter = 2 * (length + width)\narea = length * width\n\nprint(f\"Периметр: 10\")\nprint(f\"Площадь: 6\")","sub_path":"day1/Lect1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"596069052","text":"from django.conf.urls import include, url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.homepage, name='Homepage'),\n url(r'^Login/$', views.login, name='login'),\n url(r'^showuser/$', views.show_user,name = 'show_user'),\n url(r'^regist/$', views.regist, name='regist'),\n #url(r'^index/$', views.index, name='index'),\n url(r'^logout/$', views.logout, name='logout'),\n url(r'^create_new_group/$', views.create_new_group, name = 'createnewgroup'),\n url(r'^homepage/$', views.homepage, name = 'homepage'),\n url(r'^group/addme/$', views.group_addme, name='addme'),\n url(r'^group/quit_group/$', views.quit_group, name='quit'),\n url(r'^group/delete_group/$', views.delete_group, name='delete_group'),\n url(r'^group/complete_group/$', views.complete_group, name='complete_group'),\n url(r'^group_detail/(?P[\\d]+)$', views.group_detail,name = 'group_detail'),\n url(r'^groupList/$', views.grouplist,name ='groupList'),\n url(r'^comment/addo/$', views.comment_add_o,name ='comment_add_o'),\n url(r'^comment/add/$', views.comment_add,name ='comment_add'),\n # url(r'^my_group/(?P[\\w]+)$', views.my_group,name = 'my_group'),\n url(r'^my_group/$', views.my_group,name = 'my_group'),\n url(r'^myGroupDetail_organiser/(?P[\\w]+)$', views.group_detail_organiser,name='group_detail_organiser'),\n url(r'^myGroupDetail_member/(?P[\\w]+)$', views.group_detail_member,name='group_detail_member'),\n url(r'^regist/$', views.regist,name='regist'),\n]","sub_path":"books/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"294154946","text":"from datetime import datetime\n\nfrom shapely.geometry import Point\n\n#df = pd.read_csv('PortasPHC.csv', encoding='iso-8859-1', error_bad_lines=False)\nimport csv\n\nmoradas_phc = []\n\nwith open('PortasPHC.csv', 'r', encoding='iso-8859-1') as file:\n reader = csv.reader(file, delimiter=\";\")\n for row in reader:\n row.append(Point(float(row[4].replace(\",\",\".\")), float(row[5].replace(\",\",\".\"))))\n moradas_phc.append(row)\n\n\n#for morada in moradas_phc:\n # print(morada)\nstartTime = datetime.now()\nfor i in range(0, len(moradas_phc), 1):\n for j in range(i + 1, len(moradas_phc), 1):\n if moradas_phc[i][2] == moradas_phc[j][2]:\n 1\n time = datetime.now() - startTime\n print(i, str(time))","sub_path":"moradas.py","file_name":"moradas.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"269955682","text":"import numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom models.model_helpers import ParamsFlattener\nfrom nn.relaxed_bb import RelaxedBetaBernoulli\nfrom torch import nn\nfrom collections import OrderedDict as odict\n\ntry:\n from utils import utils\nexcept ModuleNotFoundError:\n # For execution as a __main__ file\n import sys\n sys.path.append('../')\n from utils import utils\n\n\nC = utils.getCudaManager('default')\n\n\nclass FeatureGenerator(nn.Module):\n \"\"\"This model will generate coordinate-wise features considering taking\n their uncertainty into account. MCdropout aids model to sample mutiple number\n of most promising update behavior without additive cost.Each coordinate will\n be passed as a set of intances along the batch dimension.\n Inputs: grad, weight, grad with k decaying weights\n Outputs: feature for each coordinates\n \"\"\"\n\n def __init__(self, hidden_sz=40, batch_std_norm=False, n_timecode=3,\n b1=[0.0, 0.5, 0.9, 0.99, 0.999],\n b2=[0.5, 0.9, 0.99, 0.999, 0.9999],\n drop_rate=0.0):\n super().__init__()\n assert all([isinstance(b, (list, tuple)) for b in (b1, b2)])\n assert len(b1) == len(b2)\n self.input_sz = len(b1) + n_timecode # grad, weight, momentums\n self.hidden_sz = hidden_sz\n self.batch_std_norm = batch_std_norm\n self.b1 = C(torch.tensor(b1))\n self.b2 = C(torch.tensor(b2))\n # self.mb1 = 1 - self.b1\n # self.mb2 = 1 - self.b2\n self.m = None # 1st momentum vector\n self.v = None # 2nd momentum vector\n self.b1_t = 1 # for bais-correction of 1st momentum\n self.b2_t = 1 # for bais-correction of 2nd momentum\n self.drop_rate = drop_rate\n self.linear = nn.Linear(self.input_sz, self.hidden_sz)\n self.nonlinear = nn.ReLU(inplace=True)\n self.eps = 1e-9\n self.t = 0\n self.t_scales = np.linspace(1, np.log(1000) / np.log(10), n_timecode)\n self.t_encoder = lambda t: [np.tanh(3*t / 10**s - 1) for s in self.t_scales]\n\n\n def new(self):\n # reset momentum\n self.m = None\n self.v = None\n self.b1_t = 1\n self.b2_t = 1\n self.t = 0\n return self\n\n def forward(self, g, p=None, n=1, step=True):\n \"\"\"\n Args:\n g(tensor): current gradient (n_params x 1)\n w(tensor): current weight (n_params x 1)\n p(float): for setting dropout probability in runtime.\n By default, use self.drop_rate.\n n(int): number of instances of MC dropout.\n\n Returns:\n x(tensor): representation for step & mask generation\n (n_params x hidden_sz)\n \"\"\"\n g_sq = g**2\n\n if self.m is None:\n # Initialize momentum matrix\n # momentum matrix: m[B, len(decay_values)]\n # should keep track of momentums for each B dimension\n m = g.repeat(1, len(self.b1))\n v = g_sq.repeat(1, len(self.b2))\n else:\n # Dimension will be matched by broadcasting\n m = self.b1 * self.m + (1 - self.b1) * g\n v = self.b2 * self.v + (1 - self.b2) * g_sq\n\n # bias-correction\n b1_t = self.b1_t * self.b1\n b2_t = self.b2_t * self.b2\n m_ = m.div(1 - b1_t)\n v_ = v.div(1 - b2_t)\n\n # Scaling\n # import pdb; pdb.set_trace()\n v_sqrt = v_.sqrt()#.detach()\n scaled_g = g.div(v_sqrt + self.eps) # AdaGrad\n scaled_m = m_.div(v_sqrt + self.eps) # Adam\n\n # Time encoding\n self.t += 1\n t = C(torch.tensor(self.t_encoder(self.t)))\n t = t.repeat(scaled_g.size(0), 1)\n\n # regularization(as in Metz et al.)\n if self.batch_std_norm:\n g = g.div(g.var(0, keepdim=True))\n # g_sq = g_sq.div(g_sq.var(0, keepdim=True))\n w = w.div(w.var(0, keepdim=True))\n\n # import pdb; pdb.set_trace()\n # scaled_g = scaled_g.div(scaled_g.var(0, keepdim=True))\n x = torch.cat([scaled_m, t], 1)#.detach()\n # x = torch.cat([g, g_sq], dim=1)\n # x = torch.cat([g, g_sq], dim=1)\n if step:\n self.m = m\n self.v = v\n self.b1_t = b1_t\n self.b2_t = b2_t\n \"\"\"Submodule inputs:\n x[n_params, 0]: current gradient\n x[n_params, 1]: current weight\n x[n_params, 2:]: momentums (for each decaying values)\n \"\"\"\n # import pdb; pdb.set_trace()\n assert x.size(1) == self.input_sz\n x = self.nonlinear(self.linear(x))\n assert x.size(1) == self.hidden_sz\n\n x = torch.cat([x] * n, dim=0) # coordinate-wise feature\n x = F.dropout(x, p=self.drop_rate if p is None else p)\n return x, v_sqrt\n\n\nclass StepGenerator(nn.Module):\n def __init__(self, hidden_sz, drop_rate=0.0, out_temp=1e-2):\n super().__init__()\n print(f'drop_rate: {drop_rate}')\n self.output_sz = 1 # log learning rate, update direction\n self.hidden_sz = hidden_sz\n self.out_temp = out_temp\n self.linear = nn.Linear(self.hidden_sz, self.output_sz)\n # self.lr = C(torch.tensor(1.).log())\n self.drop_rate = drop_rate\n self.tanh = nn.Tanh()\n # self.nonliear = nn.Tanh()\n # NOTE: last nonliearity can be critical (avoid ReLU + exp > 1)\n self.new()\n\n def new(self):\n self.log_lr = C(torch.tensor(1.).log())\n\n def forward(self, x, v_sqrt, n=1, p=None, debug=False):\n \"\"\"\n Args:\n x (tensor): representation for step & mask generation\n (n_params x hidden_sz)\n\n Returns:\n step (tensor): update vector in the parameter space. (n_params x 1)\n \"\"\"\n assert x.size(1) == self.hidden_sz\n\n x = torch.cat([x] * n, dim=0) # coordinate-wise feature\n #x = F.dropout(x, p=self.drop_rate if p is None else p)\n\n x = self.linear(x)\n \"\"\"Submodule outputs:\n y[n_params, 0]: per parameter log learning rate\n y[n_params, 1]: unnormalized update direction\n \"\"\"\n assert x.size(1) == self.output_sz\n out_1 = x[:, 0] # * self.out_temp\n # step = out_1.repeat(n) * 0.1\n step = out_1 * v_sqrt[:,-1].detach().repeat(n)\n # out_2 = x[:, 1] # * self.out_temp # NOTE: normalizing?\n # # out_3 = x[:, 2]\n # # out_3 = out_3.div(out_3.norm(p=2).detach())\n # # import pdb; pdb.set_trace()\n # self.log_lr = self.log_lr.detach() + out_1 * self.out_temp\n # direction = out_2.div(out_2.norm(p=2).detach())\n # step = torch.exp(self.log_lr) * direction\n # step = self.tanh(step) * 0.01\n if debug:\n import pdb; pdb.set_trace()\n step = torch.clamp(step, max=0.01, min=-0.01)\n\n return torch.chunk(step, n)\n\n def forward_with_mask(self, x):\n pass\n\n\nclass MaskGenerator(nn.Module):\n def __init__(self, hidden_sz=32, temp=1e2):\n super().__init__()\n self.hidden_sz = hidden_sz\n self.temp = temp\n self.gamm_g = None\n self.gamm_l = None\n\n self.nonlinear = nn.Tanh()\n self.eyes = None\n self.ones = None\n self.zeros = None\n # self.avg_layer = nn.Linear(hidden_sz, 3) # lamb, gamm_g, gamm_l\n self.out_layer = nn.Linear(hidden_sz, 1)\n self.beta_bern = RelaxedBetaBernoulli()\n self.sigmoid = nn.Sigmoid()\n self.sigmoid_temp = 1e1\n self.gamm_scale = 1e-3\n self.lamb_scale = 1\n self.p_lamb = nn.Parameter(torch.ones(1) * self.lamb_scale)\n self.p_gamm = nn.Parameter(torch.ones(1) * self.gamm_scale)\n\n def detach_lambdas_(self):\n if self.gamm_g is not None:\n self.gamm_g = self.gamm_g.detach()\n if self.gamm_l is not None:\n self.gamm_l = [l.detach() for l in self.gamm_l]\n if self.lamb is not None:\n self.lamb = self.lamb.detach()\n\n def build_block_diag(self, tensors):\n \"\"\"Build block diagonal matrix by aligning input tensors along the digonal\n elements and filling the zero blocks for the rest of area.\n\n Args: list (list of square matrices(torch.tensor))\n\n Returns: tensor (block diagonal matrix)\n \"\"\"\n # left and right marginal zero matrices\n # (to avoid duplicate allocation onto GPU)\n if self.zeros is None:\n blocks = []\n offset = 0\n size = [s.size(0) for s in tensors]\n total_size = sum(size)\n self.zeros = []\n # NOTE: exception handling for different tnesors size\n for i in range(len(tensors)):\n blocks_sub = []\n cur = tensors[i].size(0)\n if i == 0:\n blocks_sub.append(None)\n else:\n blocks_sub.append(C(torch.zeros(cur, offset)))\n offset += tensors[i].size(0)\n if i == len(tensors) - 1:\n blocks_sub.append(None)\n else:\n blocks_sub.append(C(torch.zeros(cur, total_size - offset)))\n self.zeros.append(blocks_sub)\n # concatenate the tensors with left and right block zero matices\n blocks = []\n for i in range(len(tensors)):\n blocks_sub = []\n zeros = self.zeros[i]\n if zeros[0] is not None:\n blocks_sub.append(zeros[0])\n blocks_sub.append(tensors[i])\n if zeros[1] is not None:\n blocks_sub.append(zeros[1])\n blocks.append(torch.cat(blocks_sub, dim=1))\n return torch.cat(blocks, dim=0)\n\n def unstructured(self, x, size):\n out = self.out_layer(x)\n out = torch.sigmoid(out * self.temp).squeeze()\n return out\n\n\n def forward(self, x, size, n=1, debug=False, exclude_bias=False):\n \"\"\"\n Args:\n x (tensor): representation for step & mask generation (batch x hidden_sz)\n size (dict): A dict mapping names of weight matrices to their\n torch.Size(). For example:\n\n {'mat_0': torch.Size([500, 784]), 'bias_0': torch.Size([500]),\n 'mat_1': torch.Size([10, 500]), 'bias_1': torch.Size([10])}\n \"\"\"\n assert isinstance(size, dict)\n assert x.size(1) == self.hidden_sz # if hidden_sz == 32\n\n \"\"\"Set-based feature averaging (Permutation invariant)\"\"\"\n sizes = [s for s in size.values()]\n # [torch.Size([500, 784]), torch.Size([500]),\n # torch.Size([10, 500]), torch.Size([10])]\n split_sizes = [np.prod(s) for s in size.values()] * n\n # [392000, 500, 5000, 10]\n x_set = torch.split(x, split_sizes, dim=0)\n x_set = [x.view(self.hidden_sz, s[0], -1) for x, s in zip(x_set, sizes * n)]\n x_set_sizes = [x_set[i].size() for i in range(len(x_set))]\n # [torch.Size([32, 500, 784]), torch.Size([32, 500]),\n # torch.Size([32, 10, 500]), torch.Size([32, 10])]\n if not exclude_bias:\n # unsqueeze bias to match dim\n max_d = max([len(s) for s in x_set_sizes]) # +1: hidden dimension added\n assert all([(max_d - len(s)) <= 1 for s in x_set_sizes])\n x_set = [x.unsqueeze_(-1) if len(x.size()) < max_d else x for x in x_set]\n # [torch.Size([32, 500, 784]), torch.Size([32, 500, 1]),\n # torch.Size([32, 10, 500]), torch.Size([32, 10, 1])]\n\n # concatenate bias hidden with weight hidden in the same layer\n # bias will be dropped with its corresponding weight column\n # (a) group the parameters layerwisely\n keys = [n for n in size.keys()]\n\n # names = [str(i) + '_' + n for n in names] *\n # ['mat_0', 'bias_0', 'mat_1', 'bias_1']\n set = odict()\n for key, x in zip(keys, x_set):\n id = key.split('_')[1]\n if id in set:\n set[id].append(x)\n else:\n set[id] = [x]\n\n # (b) compute mean over the set\n set = {k: torch.cat(v, dim=-1).mean(-1) for k, v in set.items()}\n else:\n set = {}\n for k, v in size.items():\n if k.split('_')[0] == 'mat':\n set[k.split('_')[1]] = v\n\n\n x_set = torch.cat([s for s in set.values()], dim=1).t()\n\n # non-linearity\n x_set = self.nonlinear(x_set)\n # x_set: [total n of set(channels) x hidden_sz]\n\n out = self.out_layer(x_set)\n out = torch.sigmoid(out * self.temp).squeeze()\n s = [v.size(1) for v in set.values()]\n n = [k for k in set.keys()]\n m = torch.split(out, s, dim=0)\n return {'layer_' + n.split('_')[0]: m for n, m in zip(n, m)}\n # self.a = F.softplus(out[:, 0].clamp(min=-10.))\n # self.b = F.softplus(out[:, 1].clamp(min=-10., max=50.))\n # self.n = [k for k in set.keys()]\n # keys = [k for k in set.keys() if k == '0']\n # self.s = [v.size(1) for v in set.values()]\n #\n # kld = self.beta_bern.compute_kld(self.a, self.b)\n #\n #\n def sample_mask(self):\n mask, pi = self.beta_bern(self.a, self.b)\n m = torch.split(mask, self.s, dim=0)\n return {'layer_' + n.split('_')[0]: m for n, m in zip(self.n, m)}\n\n # mask = torch.chunk(mask, n)\n # pi = torch.chunk(pi, n)\n # kld = []\n #\n #\n # out_list = []\n # keys = [k for k in set.keys() if k.split('_')[1] == '0']\n # split_sizes = [set[k].size(0) for k in keys]\n #\n # for m, p, a, b in zip(mask, pi, torch.chunk(a, n), torch.chunk(b, n)):\n # m = torch.split(m, split_sizes, dim=0)\n # p = torch.split(p, split_sizes, dim=0)\n # name = [k for k in set.keys()]\n # out_list.append((\n # {'layer_' + n.split('_')[0]: m for n, m in zip(name, m)},\n # {'layer_' + n.split('_')[0]: p for n, p in zip(name, p)},\n # self.beta_bern.compute_kld(a, b),\n # ))\n #\n # #\n # # mask = torch.split(mask, [s[0] for s in x_set_sizes], dim=0)\n # # name = [n for n in layerwise.keys()]\n # # mask = {'layer_' + n: m for n, m in zip(name, mask)}\n #\n # return out_list\n","sub_path":"nn/maskgen_bnn2.py","file_name":"maskgen_bnn2.py","file_ext":"py","file_size_in_byte":12913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"630532460","text":"from PyQt5 import QtGui,QtCore,QtWidgets,QtSql\nimport sys\n\n \nclass MainUi(QtWidgets.QMainWindow):\n \n def __init__(self):\n super().__init__()\n self.initUi()\n \n # 初始化UI界面\n def initUi(self):\n # 设置窗口标题\n self.setWindowTitle(\"州的先生 - 在PyQt5中使用数据库\")\n # 设置窗口大小\n self.resize(600,400)\n \n # 创建一个窗口部件\n self.widget = QtWidgets.QWidget()\n # 创建一个网格布局\n self.grid_layout = QtWidgets.QGridLayout()\n # 设置窗口部件的布局为网格布局\n self.widget.setLayout(self.grid_layout)\n \n # 创建一个按钮组\n self.group_box = QtWidgets.QGroupBox('数据库按钮')\n self.group_box_layout = QtWidgets.QVBoxLayout()\n self.group_box.setLayout(self.group_box_layout)\n # 创建一个表格部件\n self.table_widget = QtWidgets.QTableView()\n # 将上述两个部件添加到网格布局中\n self.grid_layout.addWidget(self.group_box,0,0)\n self.grid_layout.addWidget(self.table_widget,0,1)\n \n # 创建按钮组的按钮\n self.b_create_db = QtWidgets.QPushButton(\"创建数据库\")\n self.b_create_db.clicked.connect(self.create_db)\n self.b_view_data = QtWidgets.QPushButton(\"浏览数据\")\n self.b_add_row = QtWidgets.QPushButton(\"添加一行\")\n self.b_delete_row = QtWidgets.QPushButton(\"删除一行\")\n self.b_close = QtWidgets.QPushButton(\"退出\")\n self.b_close.clicked.connect(self.close)\n # 添加按钮到按钮组中\n self.group_box_layout.addWidget(self.b_create_db)\n self.group_box_layout.addWidget(self.b_view_data)\n self.group_box_layout.addWidget(self.b_add_row)\n self.group_box_layout.addWidget(self.b_delete_row)\n self.group_box_layout.addWidget(self.b_close)\n \n # 设置UI界面的核心部件\n self.setCentralWidget(self.widget)\n","sub_path":"python/python/learn/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"541753251","text":"class Calculator:\n\tdef __init__(self):\n\t\tself.add_count = 0\n\t\tself.sub_count = 0\n\t\tself.mul_count = 0\n\t\tself.div_count = 0\n\n\tdef add(self, num1, num2):\n\t\tself.add_count += 1\n\n\t\treturn num1 + num2\n\n\tdef sub(self, num1, num2):\n\t\tself.sub_count += 1\n\n\t\treturn num1 - num2\n\n\tdef mul(self, num1, num2):\n\t\tself.mul_count += 1\n\n\t\treturn num1 * num2\n\n\tdef div(self, num1, num2):\n\t\tself.div_count += 1\n\t\treturn num1 / num2\n\n\tdef show_count(self):\n\t\tprint(\"덧셈 : %s\" % self.add_count)\n\t\tprint(\"뺄셈 : %s\" % self.sub_count)\n\t\tprint(\"곱셈 : %s\" % self.mul_count)\n\t\tprint(\"나눗셈 : %s\" % self.div_count)\n\ncal = Calculator()\n\nprint(cal.add(1, 1))\nprint(cal.add(2, 2))\nprint(cal.add(3, 3))\n\nprint(cal.sub(2, 1))\nprint(cal.sub(3, 1))\n\nprint(cal.mul(2, 2))\n\ncal.show_count()\n","sub_path":"Quiz/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"102254090","text":"import scapy.all as scapy\nimport argparse\nimport pyfiglet\nimport os\nfrom termcolor import colored\ndef parser():\n parse = argparse.ArgumentParser()\n parse.add_argument('-t' , '--target' , dest='target' , help='specify IP range ')\n options = parse.parse_args()\n if not options.target:\n parse.error(' Target has not been specified ')\n return options\n\ndef scan(ip):\n print('---------------------------------------')\n print('Mac Address \\t\\t Vendor \\t IP ')\n print('---------------------------------------')\n scapy.arping(ip)\n\nos.system('cls')\noptions = parser()\nscan(options.target)\n","sub_path":"NetScanner/Network-scanner.py","file_name":"Network-scanner.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"37455585","text":"import c4d # reference Cinema4D's existing library of code, called a \"module\"\n\ndef main(): # Define the main function of the script\n \n\n ActiveObject = doc.GetActiveObject() # Look for the currently selected object\n if ActiveObject == None: return # if there is no object selected, quit \n NextObject = ActiveObject.GetNext() # Look for the object following the current object in the manager\n if NextObject == None: return # if there is no following object, quit\n doc.StartUndo() # Make the following section of code reversible\n doc.AddUndo(c4d.UNDOTYPE_DELETE, ActiveObject) # Make the following removal of the object reversible\n ActiveObject.Remove() # Remove the current object\n doc.AddUndo(c4d.UNDOTYPE_CHANGE,ActiveObject) # Make the following insertion of the object reversible\n ActiveObject.InsertUnder(NextObject) # Insert the removed object as a child of the following object in the manager\n doc.EndUndo() # Marks the end of a range of code that should be reversible\n c4d.EventAdd() # Refresh the scene to update the change\n \nif __name__=='__main__': # These two lines close out the main function. This is usually what will be used to end your script.\n main()\n","sub_path":"third_party/NW Script Tools/Move Tools/NW Make Object Child of Next in Manager.py","file_name":"NW Make Object Child of Next in Manager.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"263851119","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\n\n# External Libraries\nimport dj_redis_url\nfrom toolz import keymap\n\n# Project Library\nfrom lindy.core.constants import IN_SECONDS\nfrom lindy.core.settings import log_setting\n\n\ndef patch_cacheops(g):\n REDIS_URL = g.get('REDIS_URL')\n if not REDIS_URL:\n return\n\n log_setting('CACHEOPS', 'is enabled')\n\n g['CACHEOPS_REDIS'] = keymap(str.lower, dj_redis_url.parse(REDIS_URL))\n\n g['INSTALLED_APPS'].append('cacheops')\n\n g['CACHEOPS_DEGRADE_ON_FAILURE'] = True\n\n g['CACHEOPS_DEFAULTS'] = {'timeout': IN_SECONDS.FIFTEEN_MINUTES}\n g['CACHEOPS'] = {\n # Automatically cache any User.objects.get() calls for 15 minutes\n # This includes request.user or post.author access,\n # where Post.author is a foreign key to auth.User\n 'auth.user': {'ops': 'get'},\n 'core.user': {'ops': 'get'},\n\n # Automatically cache all gets and queryset fetches\n # to other django.contrib.auth models for an hour\n 'auth.*': {'ops': ('fetch', 'get'), 'timeout': IN_SECONDS.ONE_HOUR},\n\n # Cache gets, fetches, counts and exists to Permission\n # 'all' is just an alias for ('get', 'fetch', 'count', 'exists')\n 'auth.permission': {'ops': 'all', 'timeout': IN_SECONDS.ONE_HOUR},\n\n # Basically Never changing objects. Allow local_get (in memory)\n 'event.event': {'ops': 'all', 'local_get': True},\n 'ticket.tickettype': {'ops': 'all', 'local_get': True},\n 'ticket.tickettier': {'ops': 'all', 'local_get': True},\n 'ticket.ticketaddontype': {'ops': 'all', 'local_get': False},\n\n # Enable manual caching on all other models with default timeout of an hour\n # Use Post.objects.cache().get(...)\n # or Tags.objects.filter(...).order_by(...).cache()\n # to cache particular ORM request.\n # Invalidation is still automatic\n # '*.*': {'ops': (), 'timeout': IN_SECONDS.ONE_HOUR},\n\n # And since ops is empty by default you can rewrite last line as:\n '*.*': {'timeout': IN_SECONDS.ONE_HOUR},\n }\n","sub_path":"lindy/core/settings/cacheopts.py","file_name":"cacheopts.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"237631894","text":"import os\nimport pandas as pd\nimport re\n\nimport sys\nimport csv\n\nfrom datetime import datetime\nimport tld\nimport math\nimport numpy as np\nimport pickle\nimport wordfreq\nimport string\nfrom collections import defaultdict, Counter\n\ndgaTLD_list = [\"cf\", \"recipes\", \"email\", \"ml\", \"gq\", \"fit\", \"cn\", \"ga\", \"rest\", \"tk\"]\nhmm_add = r\"./static/hmm_matrix.csv\"\ngib_add = r\"./static/gib_model.pki\"\ngramfile_add = r\"./static/n_gram_rank_freq.txt\"\nprivate_tld_file = r\"./static/private_tld.txt\"\nhmm_prob_threshold = -120\n\n\naccepted_chars = 'abcdefghijklmnopqrstuvwxyz '\npos = dict([(char, idx) for idx, char in enumerate(accepted_chars)])\ndef get_name(url):\n\n url = url.strip(string.punctuation)\n try:\n TLD = tld.get_tld(url, as_object=True, fix_protocol=True)\n SLD = tld.get_tld(url, as_object=True, fix_protocol=True).domain\n\n except Exception as e:\n na_list = url.split(\".\")\n TLD = na_list[-1]\n SLD = na_list[-2]\n return str(TLD), str(SLD)\n\ndef load_gramdict_privatetld():\n\n rank_dict = dict()\n with open(gramfile_add, 'r') as f:\n for line in f:\n cat, gram, freq, rank = line.strip().split(',')\n rank_dict[gram] = int(rank)\n pritld_list = list()\n with open(private_tld_file, 'r') as f:\n pritld_list = set(line.strip() for line in f)\n return rank_dict, pritld_list\n\ndef cal_ent_gni_cer(SLD):\n\n f_len = float(len(SLD))\n count = Counter(i for i in SLD).most_common() # unigram frequency\n ent = -sum(float(j / f_len) * (math.log(float(j / f_len), 2)) for i, j in count) # shannon entropy\n gni = 1 - sum(float(j / f_len) * float(j / f_len) for i, j in count)\n cer = 1 - max(float(j/ f_len) for i, j in count)\n return ent, gni, cer\n\n\ndef cal_rep_letter(SLD):\n\n count = Counter(i for i in SLD if i.isalpha()).most_common()\n sum_n = 0\n for letter, cnt in count:\n if cnt > 1:\n sum_n += 1\n return sum_n\ndef cal_gib(SLD):\n\n gib_model = pickle.load(open(gib_add, 'rb'))\n mat = gib_model['mat']\n threshold = gib_model['thresh']\n\n log_prob = 0.0\n transition_ct = 0\n SLD = re.sub(\"[^a-z]\", \"\", SLD)\n gram2 = [SLD[i:i + 2] for i in range(len(SLD) - 1)]\n for a, b in gram2:\n log_prob += mat[pos[a]][pos[b]]\n transition_ct += 1\n # The exponentiation translates from log probs to probs.\n prob = math.exp(log_prob / (transition_ct or 1))\n return int(prob > threshold)\n\ndef cal_hmm_prob(url):\n\n hmm_dic = defaultdict(lambda: defaultdict(float))\n with open(hmm_add, 'r') as f:\n for line in f.readlines():\n key1, key2, value = line.rstrip().split('\\t') # key1 can be '' so rstrip() only\n value = float(value)\n hmm_dic[key1][key2] = value\n url = '^' + url.strip('.') + '$'\n gram2 = [url[i:i+2] for i in range(len(url)-1)]\n prob = hmm_dic[''][gram2[0]]\n\n for i in range(len(gram2)-1):\n prob *= hmm_dic[gram2[i]][gram2[i+1]]\n if prob < math.e ** hmm_prob_threshold:\n prob = -999\n return prob\n\n#rf\ndef cal_gram_med(SLD, n):\n grams = [SLD[i:i + n] for i in range(len(SLD) - n+1)]\n fre = list()\n for s in grams:\n fre.append(wordfreq.zipf_frequency(s, 'en'))\n return np.median(fre)\n\ndef cal_rep_cart(SLD):\n count = Counter(i for i in SLD).most_common()\n sum_n = 0\n for letter, cnt in count:\n if cnt > 1:\n sum_n += 1\n return sum_n\n\ndef SVM_get_feature(url):\n gram_rank_dict, private_tld = load_gramdict_privatetld()\n TLD, SLD = get_name(url)\n url = SLD+\".\"+TLD\n url_rm = re.sub(r\"\\.|_|-\", \"\", url)\n TLD_rm = re.sub(r\"\\.|_|-\", \"\", TLD)\n SLD_rm = re.sub(r\"\\.|_|-\", \"\", SLD)\n\n has_private_tld = 0\n for tld in private_tld:\n if tld in url:\n has_private_tld = 1\n name_list = tld.split('.')\n TLD = name_list[-1]\n SLD = name_list[-2]\n\n url = SLD + \".\" + TLD\n\n entropy = cal_ent_gni_cer(TLD)[0]\n\n f_len = float(len(SLD))\n\n ent_flen = entropy / f_len\n\n vowel_ratio = len(re.findall(r\"a|e|i|o|u\", SLD)) / f_len\n\n digit_ratio = len(re.findall(r\"[0-9]\", SLD)) / f_len\n\n repeat_letter = cal_rep_letter(SLD) / f_len\n\n dig_list = re.findall(r\"[0-9]{2,}\", url)\n dig_len = [len(dig) for dig in dig_list]\n consec_digit = sum(dig_len) / f_len\n\n con_list = re.findall(r\"[b|c|d|f|g|h|j|k|l|m|n|p|q|r|s|t|v|w|x|y|z]{2,}\", url)\n con_len = [len(con) for con in con_list]\n consec_consonant = sum(con_len) / f_len\n\n gib_value = cal_gib(SLD)\n\n hmm_log_prob = cal_hmm_prob(SLD)\n\n main_domain = '$' + SLD + '$'\n gram2 = [main_domain[i:i + 2] for i in range(len(main_domain) - 1)]\n gram3 = [main_domain[i:i + 3] for i in range(len(main_domain) - 2)]\n gram1_rank = [gram_rank_dict[i] if i in gram_rank_dict else 0 for i in main_domain[1:-1]]\n gram2_rank = [gram_rank_dict[''.join(i)] if ''.join(i) in gram_rank_dict else 0 for i in gram2]\n gram3_rank = [gram_rank_dict[''.join(i)] if ''.join(i) in gram_rank_dict else 0 for i in gram3]\n\n avg_gram1_rank = np.mean(gram1_rank)\n\n avg_gram2_rank = np.mean(gram2_rank)\n\n avg_gram3_rank = np.mean(gram3_rank)\n\n std_gram1_rank = np.std(gram1_rank)\n \n std_gram2_rank = np.std(gram2_rank)\n\n std_gram3_rank = np.std(gram3_rank)\n\n #rf\n domain_len = len(url)\n sld_len = len(SLD)\n tld_len = len(TLD)\n uni_domain = len(set(url_rm))\n uni_sld = len(set(SLD_rm))\n uni_tld = len(set(TLD_rm))\n flag_dga = 0\n for t in dgaTLD_list:\n if t in url:\n flag_dga = 1\n \n flag_dig = 0\n if re.match(\"[0-9]\", url) != None:\n flag_dig = 1\n\n sym = len(re.findall(r\"\\.|_|-\", SLD))/sld_len\n hex = len(re.findall(r\"[0-9]|[a-f]\", SLD))/sld_len\n dig = len(re.findall(r\"[0-9]\", SLD))//sld_len\n vow = len(re.findall(r\"a|e|i|o|u\", SLD))/sld_len\n con = len(re.findall(r\"b|c|d|f|g|h|j|k|l|m|n|p|q|r|s|t|v|w|x|y|z\", SLD))/sld_len\n rep_char_ratio = cal_rep_cart(SLD_rm)/uni_sld\n con_list = re.findall(r\"[b|c|d|f|g|h|j|k|l|m|n|p|q|r|s|t|v|w|x|y|z]{2,}\", url)\n con_len = [len(con) for con in con_list]\n cons_con_ratio = sum(con_len)/domain_len\n dig_list = re.findall(r\"[0-9]{2,}\", url)\n dig_len = [len(dig) for dig in dig_list]\n cons_dig_ratio = sum(dig_len)/domain_len\n tokens_sld = len(SLD.split('-'))\n digits_sld = len(re.findall(r\"[0-9]\", SLD))\n ent, gni, cer = cal_ent_gni_cer(SLD)\n gram2_med = cal_gram_med(SLD, 2)\n gram3_med = cal_gram_med(SLD, 3)\n gram2_cmed = cal_gram_med(SLD+SLD, 2)\n gram3_cmed = cal_gram_med(SLD+SLD, 3)\n\n\n feature = [entropy, f_len, ent_flen, vowel_ratio, digit_ratio, repeat_letter, consec_digit, consec_consonant,\n gib_value,hmm_log_prob, avg_gram1_rank, avg_gram2_rank, avg_gram3_rank, std_gram1_rank, std_gram2_rank,\n std_gram3_rank, has_private_tld,\n domain_len, tld_len, uni_domain, uni_sld, uni_tld, flag_dga, flag_dig, sym, hex, dig, vow,\n con, rep_char_ratio, cons_con_ratio, cons_dig_ratio, tokens_sld, digits_sld, ent, gni, cer, gram2_med,\n gram3_med, gram2_cmed, gram3_cmed]\n #17+24\n return feature\n\n\nexclude = set(['__pycache__'])\nf=[]\nfor root, dirs, filenames in os.walk(os.path.join(os.path.expanduser(\"~\"), \"%s\" % \"mytest\".lower()), topdown=True):\n dirs[:] = [d for d in dirs if d not in exclude]\n print(filenames)\n print(root)\nfor filename in (filenames):\n csv_data = pd.read_csv(root + '/' + filename, names=[\"addr\", \"type\", \"source\"])\n csv_df = pd.DataFrame(csv_data)\n csv_df = csv_df[\"addr\"]\n csv_df.drop_duplicates(inplace=True)\n print(csv_df.shape[0])\n csv_df.reset_index(drop=True, inplace=True)\n\n\n #store as file\n \n fea = open(os.path.join(os.path.expanduser(\"~\"), \"%s\" % \"static\".lower(),'feature.csv'),'a+')\n csv_writer=csv.writer(fea)\n csv_writer.writerow(['1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17',\n '18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42'])\n \n for i in range(csv_df.shape[0]):\n if re.match(r\"\\A\\d+\\.\\d+\\.\\d+\\.\\d+\", csv_df[i]):\n csv_df.drop(index=i, inplace=True)\n continue\n \n try:\n f=SVM_get_feature(csv_df[i])\n except Exception as e:\n print(e)\n \n #print(f)\n f.append(0)\n csv_writer.writerow(f)\n\n fea.close()\n\n \n print(csv_df.shape[0])\n","sub_path":"data_proc/preproc_3.py","file_name":"preproc_3.py","file_ext":"py","file_size_in_byte":8501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"428557756","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom pymysql import *\nimport sys\nimport os\nimport util\nimport re\nimport sent_email\n\n\nclass Rider(QWidget):\n to_login = pyqtSignal()\n\n def __init__(self, username):\n super(Rider, self).__init__()\n self.connect = util.sql_connect()\n self.cwd = os.getcwd()\n self.cursor = self.connect.cursor()\n self.setFixedSize(900, 1400)\n self.setWindowIcon(QIcon(\"./image/icon.jpg\"))\n self.setWindowTitle(\"饱了没-骑手版\")\n self.username = username\n self.login_palette = QPalette()\n self.login_palette.setColor(self.backgroundRole(), QColor(255, 255, 255)) # 设置背景颜色\n self.setPalette(self.login_palette)\n self.info = None\n self.all_orders = []\n self.info_refresh()\n print(self.info)\n self.dialog = None\n self.all_items = []\n self.INDEX = 0\n self.MAIN = 0\n self.ORDER = 1\n self.PERSONAL = 2\n\n self.main_button = QPushButton(\"主菜单\", self)\n self.main_button.move(0, 1342)\n self.main_button.resize(300, 60)\n self.main_button.setFont(QFont('幼圆', 13))\n self.main_button.setStyleSheet(\"background-color: rgb(51, 153, 255)\")\n self.main_button.clicked.connect(lambda: self.menu_choose(self.MAIN))\n\n self.order_button = QPushButton(\"订单中心\", self)\n self.order_button.move(300, 1342)\n self.order_button.resize(300, 60)\n self.order_button.setFont(QFont('幼圆', 13))\n self.order_button.setStyleSheet(\"background-color: rgb(232, 232, 232)\")\n self.order_button.clicked.connect(lambda: self.menu_choose(self.ORDER))\n\n self.personal_button = QPushButton(\"个人中心\", self)\n self.personal_button.move(600, 1342)\n self.personal_button.resize(302, 60)\n self.personal_button.setFont(QFont('幼圆', 13))\n self.personal_button.setStyleSheet(\"background-color: rgb(232, 232, 232)\")\n self.personal_button.clicked.connect(lambda: self.menu_choose(self.PERSONAL))\n\n self.logo = QLabel(self)\n self.logo.setPixmap(QPixmap(\"./image/logo.png\"))\n self.logo.setScaledContents(True)\n self.logo.resize(500, 200)\n self.logo.move(200, 50)\n\n self.request = QLabel(self)\n self.request.setPixmap(QPixmap(\"./image/request_abstract.jpg\"))\n self.request.setScaledContents(True)\n self.request.resize(430, 430)\n self.request.move(10, 280)\n\n self.request_button = QPushButton(\"接单中心\", self)\n self.request_button.move(10, 710)\n self.request_button.resize(430, 60)\n self.request_button.setFont(QFont('幼圆', 13))\n self.request_button.clicked.connect(self.func_request_button)\n\n self.rank = QLabel(self)\n self.rank.setPixmap(QPixmap(\"./image/rank_abstract.jpg\"))\n self.rank.setScaledContents(True)\n self.rank.resize(430, 430)\n self.rank.move(460, 280)\n\n self.rank_button = QPushButton(\"骑手排行\", self)\n self.rank_button.move(460, 710)\n self.rank_button.resize(430, 60)\n self.rank_button.setFont(QFont('幼圆', 13))\n self.rank_button.clicked.connect(self.func_rank_button)\n\n self.evaluate = QLabel(self)\n self.evaluate.setPixmap(QPixmap(\"./image/evaluate_abstract.jpg\"))\n self.evaluate.setScaledContents(True)\n self.evaluate.resize(430, 430)\n self.evaluate.move(10, 810)\n\n self.evaluate_button = QPushButton(\"我的评价\", self)\n self.evaluate_button.move(10, 1240)\n self.evaluate_button.resize(430, 60)\n self.evaluate_button.setFont(QFont('幼圆', 13))\n self.evaluate_button.clicked.connect(self.func_evaluate_button)\n\n self.contract = QLabel(self)\n self.contract.setPixmap(QPixmap(\"./image/merchant_abstract.jpg\"))\n self.contract.setScaledContents(True)\n self.contract.resize(430, 430)\n self.contract.move(460, 810)\n\n self.contract_button = QPushButton(\"签约商家\", self)\n self.contract_button.move(460, 1240)\n self.contract_button.resize(430, 60)\n self.contract_button.setFont(QFont('幼圆', 13))\n self.contract_button.clicked.connect(self.func_contract_button)\n\n self.main_items = []\n self.main_items.append(self.logo)\n self.main_items += [self.request, self.request_button]\n self.main_items += [self.rank, self.rank_button]\n self.main_items += [self.evaluate, self.evaluate_button]\n self.main_items += [self.contract, self.contract_button]\n self.all_items.append(self.main_items)\n\n self.order_logo = QLabel(self)\n self.order_logo.setPixmap(QPixmap(\"./image/order_logo.png\"))\n self.order_logo.setScaledContents(True)\n self.order_logo.resize(550, 200)\n self.order_logo.move(175, 0)\n\n self.order_index = 0\n self.sum_order = len(self.all_orders)\n self.pre_order = QPushButton(\"上一页\", self)\n self.pre_order.resize(160, 50)\n self.pre_order.setFont(QFont('幼圆', 10))\n self.pre_order.move(210, 1287)\n self.pre_order.clicked.connect(lambda: self.change_order(-1))\n\n self.next_order = QPushButton(\"下一页\", self)\n self.next_order.resize(160, 50)\n self.next_order.setFont(QFont('幼圆', 10))\n self.next_order.move(530, 1287)\n self.next_order.clicked.connect(lambda: self.change_order(1))\n\n self.show_order = QPushButton(str(self.order_index + 1), self)\n self.show_order.setStyleSheet(\"background-color: rgb(51, 153, 255)\")\n self.show_order.resize(60, 50)\n self.show_order.setFont(QFont('幼圆', 12))\n self.show_order.move(420, 1287)\n\n self.products = QScrollArea(self)\n self.products.move(0, 530)\n self.products.resize(900, 750)\n\n self.all_product = None\n self.order_items = []\n self.order_items.append(self.pre_order)\n self.order_items.append(self.next_order)\n self.order_items.append(self.show_order)\n self.order_items.append(self.order_logo)\n self.order_items.append(self.products)\n self.all_items.append(self.order_items)\n\n self.plogo = QLabel(self)\n self.plogo.setPixmap(QPixmap(\"./image/logo.png\"))\n self.plogo.setScaledContents(True)\n self.plogo.resize(500, 200)\n self.plogo.move(200, 0)\n\n self.personal_portrait = QLabel(self)\n self.personal_portrait.setPixmap(QPixmap(self.info[6]))\n self.personal_portrait.setScaledContents(True)\n self.personal_portrait.resize(250, 250)\n self.personal_portrait.move(50, 210)\n\n self.change_password = QPushButton(\"修改密码\", self)\n self.change_password.resize(180, 50)\n self.change_password.setFont(QFont(\"宋体\", 12))\n self.change_password.move(650, 215)\n self.change_password.clicked.connect(self.change_password_dialog)\n\n self.upgrade = QPushButton(\"提升等级\", self)\n self.upgrade.resize(180, 50)\n self.upgrade.setFont(QFont(\"宋体\", 12))\n self.upgrade.move(650, 305)\n self.upgrade.clicked.connect(self.upgrade_dialog)\n\n self.change_portrait = QPushButton(\"更改头像\", self)\n self.change_portrait.resize(180, 50)\n self.change_portrait.setFont(QFont('宋体', 12))\n self.change_portrait.move(650, 400)\n self.change_portrait.clicked.connect(self.portrait_file_dialog)\n\n self.change_email = QPushButton(\"更改邮箱\", self)\n self.change_email.resize(180, 50)\n self.change_email.setFont(QFont('宋体', 12))\n self.change_email.move(650, 505)\n self.change_email.clicked.connect(self.change_email_dialog)\n\n self.change_phone = QPushButton(\"更改电话\", self)\n self.change_phone.resize(180, 50)\n self.change_phone.setFont(QFont('宋体', 12))\n self.change_phone.move(650, 610)\n self.change_phone.clicked.connect(self.change_phone_dialog)\n\n self.change_address = QPushButton(\"更换地址\", self)\n self.change_address.resize(180, 50)\n self.change_address.setFont(QFont('宋体', 12))\n self.change_address.move(650, 1030)\n self.change_address.clicked.connect(self.change_address_dialog)\n\n self.quit = QPushButton(\"退出登录\", self)\n self.quit.setStyleSheet(\"background-color: rgb(238, 44, 44)\")\n self.quit.resize(300, 60)\n self.quit.setFont(QFont('幼圆', 14))\n self.quit.move(300, 1200)\n self.quit.clicked.connect(self.quit_to_login)\n\n self.personal_items = []\n self.personal_items.append(self.plogo)\n self.personal_items.append(self.change_portrait)\n self.personal_items.append(self.personal_portrait)\n self.personal_items.append(self.upgrade)\n self.personal_items.append(self.change_password)\n self.personal_items.append(self.change_email)\n self.personal_items.append(self.change_phone)\n self.personal_items.append(self.change_address)\n self.personal_items.append(self.quit)\n self.all_items.append(self.personal_items)\n\n self.show_interface(self.MAIN)\n\n def change_email_dialog(self):\n self.dialog = QDialog()\n self.dialog.setFixedSize(500, 160)\n self.dialog.setWindowIcon(QIcon(\"./image/icon.jpg\"))\n\n input_email = QLineEdit(self.dialog)\n input_email.setFont(QFont(\"宋体\", 12))\n input_email.resize(450, 50)\n input_email.setPlaceholderText(\"输入新邮箱\")\n input_email.move(25, 20)\n input_email.setMaxLength(50)\n\n confirm = QPushButton(\"修改邮箱\", self.dialog)\n confirm.move(25, 90)\n confirm.resize(450, 50)\n confirm.setFont(QFont('幼圆', 12))\n confirm.setStyleSheet(\"background-color: rgb(51, 153, 255)\")\n confirm.clicked.connect(\n lambda: self.confirm_change_email(input_email.text()))\n\n self.dialog.setWindowTitle(\"修改邮箱\")\n self.dialog.setWindowModality(Qt.ApplicationModal)\n self.dialog.exec_()\n\n def confirm_change_email(self, email):\n if len(email) == 0:\n QMessageBox.critical(self, \"饱了没\", \"邮箱不能为空!\")\n elif sent_email.email_type(email) == -1:\n QMessageBox.critical(self, \"饱了没\", \"邮箱格式错误!\")\n else:\n sql = \"select count(*) from users where `email` = '%s'\" % email\n self.cursor.execute(sql)\n if self.cursor.fetchone()[0] > 0:\n QMessageBox.critical(self, \"饱了没\", \"邮箱已经被注册!\")\n else:\n sql = \"update users set `email` = '%s' where `username` = '%s'\" % (email, self.info[0])\n try:\n self.cursor.execute(sql)\n self.connect.commit()\n self.info[4] = email\n self.update()\n self.dialog.close()\n except:\n QMessageBox.critical(self, \"饱了没\", \"修改失败!\\n请检查数据库连接!\")\n\n def portrait_file_dialog(self):\n file_name, file_type = QFileDialog.getOpenFileName(self,\n \"选取文件\",\n os.getcwd(), # 起始路径\n \"图像文件 (*.bmp *.jpg *.jpeg *.png)\")\n if file_name != \"\":\n sql = \"update `rider` set `portrait` = '%s' where username = '%s'\" % (file_name, self.info[0])\n try:\n self.cursor.execute(sql)\n self.connect.commit()\n self.personal_portrait.setPixmap(QPixmap(file_name))\n self.info[6] = file_name\n except:\n QMessageBox.critical(self, \"饱了没\", \"上传失败!\\n请检查数据库连接!\")\n\n def change_phone_dialog(self):\n self.dialog = QDialog()\n self.dialog.setFixedSize(500, 160)\n self.dialog.setWindowIcon(QIcon(\"./image/icon.jpg\"))\n\n input_phone = QLineEdit(self.dialog)\n input_phone.setFont(QFont(\"宋体\", 12))\n input_phone.resize(450, 50)\n input_phone.setPlaceholderText(\"输入新电话\")\n input_phone.move(25, 20)\n input_phone.setMaxLength(50)\n\n confirm = QPushButton(\"修改电话\", self.dialog)\n confirm.move(25, 90)\n confirm.resize(450, 50)\n confirm.setFont(QFont('幼圆', 12))\n confirm.setStyleSheet(\"background-color: rgb(51, 153, 255)\")\n confirm.clicked.connect(\n lambda: self.confirm_change_phone(input_phone.text()))\n\n self.dialog.setWindowTitle(\"修改电话\")\n self.dialog.setWindowModality(Qt.ApplicationModal)\n self.dialog.exec_()\n\n def confirm_change_phone(self, phone):\n if len(phone) == 0:\n QMessageBox.critical(self, \"饱了没\", \"电话不能为空!\")\n else:\n pattern = re.compile(r\"^\\+?\\d+(-\\d+)*$\")\n if pattern.match(phone):\n sql = \"select count(*) from users where `telephone` = '%s'\" % phone\n self.cursor.execute(sql)\n if self.cursor.fetchone()[0] > 0:\n QMessageBox.critical(self, \"饱了没\", \"电话已经被注册!\")\n else:\n sql = \"update users set `telephone` = '%s' where `username` = '%s'\" % (phone, self.info[0])\n try:\n self.cursor.execute(sql)\n self.connect.commit()\n self.info[3] = phone\n self.update()\n self.dialog.close()\n except:\n QMessageBox.critical(self, \"饱了没\", \"修改失败!\\n请检查数据库连接!\")\n else:\n QMessageBox.critical(self, \"饱了没\", \"电话格式错误!\")\n\n def change_address_dialog(self):\n self.dialog = QDialog()\n self.dialog.setFixedSize(325, 160)\n self.dialog.setWindowIcon(QIcon(\"./image/icon.jpg\"))\n\n pos_x = QComboBox(self.dialog)\n pos_x.addItems([str(i) for i in range(1, 1000)])\n pos_x.setFont(QFont('宋体', 12))\n pos_x.resize(100, 50)\n pos_x.move(25, 20)\n\n pos_y = QComboBox(self.dialog)\n pos_y.addItems([str(i) for i in range(1, 1000)])\n pos_y.setFont(QFont('宋体', 12))\n pos_y.resize(100, 50)\n pos_y.move(200, 20)\n\n confirm = QPushButton(\"修改地址\", self.dialog)\n confirm.move(25, 90)\n confirm.resize(275, 50)\n confirm.setFont(QFont('幼圆', 12))\n confirm.setStyleSheet(\"background-color: rgb(51, 153, 255)\")\n confirm.clicked.connect(lambda: self.confirm_change_address(int(pos_x.currentText()), int(pos_y.currentText())))\n\n self.dialog.setWindowTitle(\"迁址\")\n self.dialog.setWindowModality(Qt.ApplicationModal)\n self.dialog.exec_()\n\n def confirm_change_address(self, x, y):\n sql1 = \"update `rider` set `longitude` = %d where `username` = '%s'\" % (x, self.info[0])\n sql2 = \"update `rider` set `latitude` = %d where `username` = '%s'\" % (y, self.info[0])\n try:\n self.cursor.execute(sql1)\n self.cursor.execute(sql2)\n self.connect.commit()\n self.info[10] = x\n self.info[11] = y\n self.update()\n self.dialog.close()\n except:\n QMessageBox.critical(self, \"饱了没\", \"修改失败!\\n请检查数据库连接!\")\n\n def upgrade_dialog(self):\n self.dialog = QDialog()\n self.dialog.setFixedSize(500, 630)\n self.dialog.setWindowIcon(QIcon(\"./image/icon.jpg\"))\n\n QR_code = QLabel(self.dialog)\n QR_code.setPixmap(QPixmap(\"./image/qrcode_diary.png\"))\n QR_code.setScaledContents(True)\n QR_code.resize(450, 450)\n QR_code.move(25, 25)\n\n input_password = QLineEdit(self.dialog)\n input_password.setFont(QFont(\"宋体\", 12))\n input_password.resize(450, 50)\n input_password.setPlaceholderText(\"密钥:扫一扫输入网页链接\")\n input_password.move(25, 490)\n input_password.setMaxLength(100)\n\n confirm = QPushButton(\"确认升级\", self.dialog)\n confirm.move(25, 560)\n confirm.resize(450, 50)\n confirm.setFont(QFont('幼圆', 12))\n confirm.setStyleSheet(\"background-color: rgb(51, 153, 255)\")\n confirm.clicked.connect(lambda: self.confirm_upgrade(input_password.text()))\n\n self.dialog.setWindowTitle(\"升级等级\")\n self.dialog.setWindowModality(Qt.ApplicationModal)\n self.dialog.exec_()\n\n def confirm_upgrade(self, password):\n if password == \"http://kuroko.info/areyourfull-diary/\" \\\n or password == \"http://kuroko.info/areyourfull-diary\" \\\n or password == \"kuroko.info/areyourfull-diary/\" \\\n or password == \"kuroko.info/areyourfull-diary\":\n sql = \"update `rider` set `rider_level` = 'expert' where `username` = '%s'\" % self.info[0]\n try:\n self.cursor.execute(sql)\n self.connect.commit()\n self.info[7] = \"expert\"\n self.update()\n self.dialog.close()\n except:\n QMessageBox.critical(self, \"饱了没\", \"升级失败!\\n请检查数据库连接!\")\n else:\n QMessageBox.critical(self, \"饱了没\", \"密码错误!\\n请检查密钥内容!\")\n\n def quit_to_login(self):\n self.to_login.emit()\n\n def show_interface(self, index):\n self.INDEX = index\n self.info_refresh()\n self.update()\n self.main_button.setVisible(True)\n self.order_button.setVisible(True)\n self.personal_button.setVisible(True)\n for i in range(len(self.all_items)):\n for j in range(len(self.all_items[i])):\n self.all_items[i][j].setVisible(False)\n\n for j in range(len(self.all_items[index])):\n self.all_items[index][j].setVisible(True)\n\n def info_refresh(self):\n self.connect = util.sql_connect()\n self.cursor = self.connect.cursor()\n sql = \"select * from `users` where username = '%s'\" % self.username\n self.cursor.execute(sql)\n self.info = list(self.cursor.fetchone())\n sql = \"select * from `rider` where username = '%s'\" % self.username\n self.cursor.execute(sql)\n self.info += list(self.cursor.fetchone())[1:]\n sql = \"select * from `order` where `rider_name` = '%s'\" % self.username\n self.cursor.execute(sql)\n self.all_orders = self.cursor.fetchall()\n\n def menu_choose(self, index):\n if index == 1 and self.sum_order == 0:\n QMessageBox.critical(self, \"饱了没\", \"暂无订单!\")\n return\n self.show_interface(index)\n tmp1 = [self.main_button, self.order_button, self.personal_button]\n for e in tmp1: e.setStyleSheet(\"background-color: rgb(232, 232, 232)\")\n tmp1[index].setStyleSheet(\"background-color: rgb(51, 153, 255)\")\n\n if index == self.ORDER:\n self.display_order(0)\n\n def func_request_button(self):\n self.info_refresh()\n sql = \"select `order_id`,`order_time`,`customer_name`,`order`.merchant_name,`state`\" \\\n \"from `order` inner join `contract` \" \\\n \"on `order`.merchant_name = `contract`.merchant_name \" \\\n \"where `contract`.rider_name = '%s'\" \\\n \"and (`state` = 1 or `state` = 2) \" % self.info[0]\n self.cursor.execute(sql)\n all_requests = self.cursor.fetchall()\n print(all_requests)\n if len(all_requests) == 0:\n QMessageBox.about(self, \"饱了没\", \"暂无订单请求!\")\n return\n self.dialog = QDialog()\n self.dialog.setFixedSize(1400, 1400)\n self.dialog.setWindowIcon(QIcon(\"./image/icon.jpg\"))\n self.dialog.setWindowTitle(\"可接订单\")\n self.dialog.setWindowModality(Qt.ApplicationModal)\n self.dialog.setPalette(self.login_palette)\n\n request_list = QScrollArea(self.dialog)\n request_list.move(0, 0)\n request_list.resize(1400, 1400)\n items = QWidget()\n vLayout = QVBoxLayout(items)\n for request in all_requests:\n vLayout.addWidget(self.create_request_item(request))\n request_list.setWidget(items)\n self.dialog.exec_()\n\n def create_request_item(self, request):\n groupBox = QGroupBox(self)\n sql = \"select * from customer where `username` = '%s'\" % request[2]\n self.cursor.execute(sql)\n cust_info = self.cursor.fetchone()\n sql = \"select * from merchant where `username` = '%s'\" % request[3]\n self.cursor.execute(sql)\n merchant_info = self.cursor.fetchone()\n\n pic = QLabel(self)\n pic.setPixmap(QPixmap(cust_info[4]))\n pic.setScaledContents(True)\n pic.resize(50, 50)\n pic.move(0, 0)\n\n name = QLabel(self)\n name.setText(\"下单用户: \"+cust_info[0])\n\n mname = QLabel(self)\n mname.setText(\"店铺名称: \" + merchant_info[5])\n\n order_time = QLabel(self)\n order_time.setText(\"下单时间: \"+request[1].strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n address = QLabel(self)\n address.setText(\"店铺地址:(%d,%d) 送达地址:(%d,%d) \" % (merchant_info[6], merchant_info[7], cust_info[7], cust_info[8]))\n\n order_id = QLabel(self)\n order_id.setText(\"订单号:%d\" % request[0])\n\n text_layout = QVBoxLayout()\n text_layout.addWidget(order_id)\n text_layout.addWidget(name)\n text_layout.addWidget(mname)\n text_layout.addWidget(order_time)\n text_layout.addWidget(address)\n text_widget = QWidget()\n text_widget.setLayout(text_layout)\n\n accept_button = QPushButton(self)\n if request[4] == 1:\n accept_button.setText(\"配送\")\n accept_button.setStyleSheet(\"background-color: rgb(51, 153, 255)\")\n elif request[4] == 2:\n accept_button.setText(\"送达\")\n accept_button.setStyleSheet(\"background-color: rgb(238, 44, 44)\")\n accept_button.clicked.connect(lambda: self.accept_order(accept_button, request[0], request[4], cust_info[0], merchant_info[0]))\n\n button_layout = QVBoxLayout()\n button_layout.addWidget(accept_button)\n button_widget = QWidget()\n button_widget.setLayout(button_layout)\n\n main_layout = QHBoxLayout()\n main_layout.addWidget(pic)\n main_layout.addWidget(text_widget)\n main_layout.addWidget(button_widget)\n groupBox.setLayout(main_layout)\n return groupBox\n\n def accept_order(self, btn, order_id, state, cust_name, merchant_name):\n if state == 1:\n try:\n sql = \"update `order` set `rider_name` = '%s' where `order_id` = %d\" % (self.info[0], order_id)\n sql1 = \"update `order` set `state` = 2 where `order_id` = %d\" % order_id\n sql2 = \"update `order` set `rider_accept_time` = '%s' where `order_id` = %d\" % (util.get_time(), order_id)\n self.cursor.execute(sql)\n self.cursor.execute(sql1)\n self.cursor.execute(sql2)\n self.connect.commit()\n btn.setStyleSheet(\"background-color: rgb(232, 232, 232)\")\n btn.setText(\"已接受\")\n except:\n self.connect.rollback()\n QMessageBox.critical(self, \"饱了没\", \"接受失败!\\n请检查数据库连接!\")\n elif state == 2:\n try:\n sql = \"select sumMoney(%d)\" % order_id\n self.cursor.execute(sql)\n money = self.cursor.fetchone()[0]\n sql = [\"\" for i in range(7)]\n sql[0] = \"update `order` set `state` = 3 where `order_id` = %d\" % order_id\n sql[1] = \"update `order` set `finish_time` = '%s' where `order_id` = %d\" % (util.get_time(), order_id)\n sql[2] = \"update `customer` set `sum_order` = `sum_order`+1 where `username` = '%s'\" % cust_name\n sql[3] = \"update `merchant` set `sum_order` = `sum_order`+1 where `username` = '%s'\" % merchant_name\n sql[4] = \"update `merchant` set `income` = `income`+%f where `username` = '%s'\" %(money * 0.9, merchant_name)\n sql[5] = \"update `rider` set `sum_order` = `sum_order`+1 where `username` = '%s'\" % self.info[0]\n sql[6] = \"update `rider` set `income` = `income`+%f where `username` = '%s'\" % (money * 0.1, self.info[0])\n for i in range(7): self.cursor.execute(sql[i])\n self.connect.commit()\n btn.setStyleSheet(\"background-color: rgb(232, 232, 232)\")\n btn.setText(\"已送达\")\n except:\n self.connect.rollback()\n QMessageBox.critical(self, \"饱了没\", \"接受失败!\\n请检查数据库连接!\")\n\n\n def func_rank_button(self):\n self.info_refresh()\n sql = \"select * from `rider` order by `sum_order` desc\"\n self.cursor.execute(sql)\n all_riders = self.cursor.fetchall()\n\n self.dialog = QDialog()\n self.dialog.setFixedSize(1200, 1200)\n self.dialog.setWindowIcon(QIcon(\"./image/icon.jpg\"))\n self.dialog.setWindowTitle(\"骑手排行\")\n self.dialog.setWindowModality(Qt.ApplicationModal)\n self.dialog.setPalette(self.login_palette)\n\n dishes_list = QScrollArea(self.dialog)\n dishes_list.move(0, 0)\n dishes_list.resize(1200, 1200)\n items = QWidget()\n vLayout = QVBoxLayout(items)\n rank = 1\n for rider in all_riders:\n vLayout.addWidget(self.create_rank_item(rider, rank))\n rank += 1\n dishes_list.setWidget(items)\n self.dialog.exec_()\n\n def create_rank_item(self, rider, rank):\n groupBox = QGroupBox(self)\n pic = QLabel(self)\n pic.setPixmap(QPixmap(rider[2]))\n pic.setScaledContents(True)\n pic.resize(150, 150)\n pic.move(0, 0)\n\n name = QLabel(self)\n tmp = rider[0]\n length = 28\n if(len(tmp)) > length: tmp = tmp[:length]\n while(len(tmp)) < length: tmp += \" \"\n name.setText(tmp)\n rank_label = QLabel(\"排名: %d\" % rank, self)\n sum_order = QLabel(\"总单数:\"+str(rider[4]), self)\n score = QLabel(self)\n if rider[8] == 0: score.setText(\"评分: 0.0\")\n else: score.setText(\"评分: %.1f\" % (rider[5]/rider[8]))\n\n\n text_layout = QVBoxLayout()\n text_layout.addWidget(name)\n text_layout.addWidget(rank_label)\n text_layout.addWidget(sum_order)\n text_layout.addWidget(score)\n text_widget = QWidget()\n text_widget.setLayout(text_layout)\n\n main_layout = QHBoxLayout()\n main_layout.addWidget(pic)\n main_layout.addWidget(text_widget)\n groupBox.setLayout(main_layout)\n return groupBox\n\n def delete_dish(self, btn, dish_id):\n try:\n sql = \"delete from `product` where `product_id` = %d\" % dish_id\n self.cursor.execute(sql)\n self.connect.commit()\n btn.setText(\"已删除\")\n except:\n self.connect.rollback()\n QMessageBox.critical(self, \"饱了没\", \"删除失败!\\n请检查数据库连接!\")\n\n def change_dish_name(self, btn, dish_id):\n self.dialog = QDialog()\n self.dialog.setFixedSize(500, 160)\n self.dialog.setWindowIcon(QIcon(\"./image/icon.jpg\"))\n\n input_name = QLineEdit(self.dialog)\n input_name.setFont(QFont(\"宋体\", 12))\n input_name.resize(450, 50)\n input_name.setPlaceholderText(\"输入新名称\")\n input_name.move(25, 20)\n input_name.setMaxLength(50)\n\n confirm = QPushButton(\"确认修改\", self.dialog)\n confirm.move(25, 90)\n confirm.resize(450, 50)\n confirm.setFont(QFont('幼圆', 12))\n confirm.setStyleSheet(\"background-color: rgb(51, 153, 255)\")\n confirm.clicked.connect(\n lambda: self.confirm_change_dish_name(input_name.text(), dish_id, btn))\n\n self.dialog.setWindowTitle(\"修改菜品名称\")\n self.dialog.setWindowModality(Qt.ApplicationModal)\n self.dialog.exec_()\n\n def confirm_change_dish_name(self, name, dish_id, btn):\n if len(name) == 0:\n QMessageBox.critical(self, \"饱了没\", \"名称不能为空!\")\n else:\n try:\n sql = \"update `product` set `name` = '%s' where `product_id` = %s\" % (name, dish_id)\n self.cursor.execute(sql)\n self.connect.commit()\n length = 28\n if (len(name)) > length: name = name[:length]\n while (len(name)) < length: name += \" \"\n btn.setText(name)\n self.dialog.close()\n except:\n QMessageBox.critical(self, \"饱了没\", \"修改失败!\\n请检查数据库连接!\")\n\n def change_dish_price(self, btn, dish_id):\n self.dialog = QDialog()\n self.dialog.setFixedSize(500, 160)\n self.dialog.setWindowIcon(QIcon(\"./image/icon.jpg\"))\n\n input_price = QLineEdit(self.dialog)\n input_price.setFont(QFont(\"宋体\", 12))\n input_price.resize(450, 50)\n input_price.setPlaceholderText(\"输入新价格\")\n input_price.move(25, 20)\n input_price.setMaxLength(50)\n\n confirm = QPushButton(\"确认修改\", self.dialog)\n confirm.move(25, 90)\n confirm.resize(450, 50)\n confirm.setFont(QFont('幼圆', 12))\n confirm.setStyleSheet(\"background-color: rgb(51, 153, 255)\")\n confirm.clicked.connect(\n lambda: self.confirm_change_dish_price(input_price.text(), dish_id, btn))\n\n self.dialog.setWindowTitle(\"修改菜品价格\")\n self.dialog.setWindowModality(Qt.ApplicationModal)\n self.dialog.exec_()\n\n def confirm_change_dish_price(self, price, dish_id, btn):\n try:\n price = float(price)\n if price <= 0:\n QMessageBox.critical(self, \"饱了没\", \"价格必须是正数!\")\n return\n except:\n QMessageBox.critical(self, \"饱了没\", \"输入格式有误\")\n return\n try:\n sql = \"update `product` set `price` = '%s' where `product_id` = %s\" % (price, dish_id)\n self.cursor.execute(sql)\n self.connect.commit()\n btn.setText(\"价格:\" + str(price) + \"元\")\n self.dialog.close()\n except:\n QMessageBox.critical(self, \"饱了没\", \"修改失败!\\n请检查数据库连接!\")\n\n\n def change_dish_num(self, btn, dish_id):\n self.dialog = QDialog()\n self.dialog.setFixedSize(500, 160)\n self.dialog.setWindowIcon(QIcon(\"./image/icon.jpg\"))\n\n input_num = QLineEdit(self.dialog)\n input_num.setFont(QFont(\"宋体\", 12))\n input_num.resize(450, 50)\n input_num.setPlaceholderText(\"输入库存\")\n input_num.move(25, 20)\n input_num.setMaxLength(50)\n\n confirm = QPushButton(\"确认修改\", self.dialog)\n confirm.move(25, 90)\n confirm.resize(450, 50)\n confirm.setFont(QFont('幼圆', 12))\n confirm.setStyleSheet(\"background-color: rgb(51, 153, 255)\")\n confirm.clicked.connect(\n lambda: self.confirm_change_dish_num(input_num.text(), dish_id, btn))\n\n self.dialog.setWindowTitle(\"修改菜品库存\")\n self.dialog.setWindowModality(Qt.ApplicationModal)\n self.dialog.exec_()\n\n def confirm_change_dish_num(self, num, dish_id, btn):\n try:\n num = int(num)\n if num < 0:\n QMessageBox.critical(self, \"饱了没\", \"库存不能是负数!\")\n return\n except:\n QMessageBox.critical(self, \"饱了没\", \"输入格式有误\")\n return\n try:\n sql = \"update `product` set `left` = '%s' where `product_id` = %s\" % (num, dish_id)\n self.cursor.execute(sql)\n self.connect.commit()\n btn.setText(\"剩余:\" + str(num))\n self.dialog.close()\n except:\n QMessageBox.critical(self, \"饱了没\", \"修改失败!\\n请检查数据库连接!\")\n\n def func_evaluate_button(self):\n self.info_refresh()\n sql = \"select `order_id`,`customer_name`,`score`,`comment` \" \\\n \"from `order` inner join `evaluate` \" \\\n \"on `order`.`rider_evaluate_id` = `evaluate`.`evaluate_id` \" \\\n \"where `order`.`rider_name` = '%s'\" % self.info[0]\n self.cursor.execute(sql)\n all_evaluate = self.cursor.fetchall()\n if len(all_evaluate) == 0:\n QMessageBox.about(self, \"饱了没\", \"暂无评价!\")\n return\n self.dialog = QDialog()\n self.dialog.setFixedSize(1200, 1200)\n self.dialog.setWindowIcon(QIcon(\"./image/icon.jpg\"))\n self.dialog.setWindowTitle(\"我的评价\")\n self.dialog.setWindowModality(Qt.ApplicationModal)\n self.dialog.setPalette(self.login_palette)\n\n request_list = QScrollArea(self.dialog)\n request_list.move(0, 0)\n request_list.resize(1200, 1200)\n items = QWidget()\n vLayout = QVBoxLayout(items)\n for evaluate in all_evaluate:\n vLayout.addWidget(self.create_evaluate_item(evaluate))\n request_list.setWidget(items)\n self.dialog.exec_()\n\n def create_evaluate_item(self, evaluate):\n groupBox = QGroupBox(self)\n sql = \"select `portrait` from customer where `username` = '%s'\" % evaluate[1]\n self.cursor.execute(sql)\n path = self.cursor.fetchone()[0]\n pic = QLabel(self)\n pic.setPixmap(QPixmap(path))\n pic.setScaledContents(True)\n pic.resize(50, 50)\n pic.move(0, 0)\n\n name = QLabel(self)\n name.setText(\"评价用户: \" + evaluate[1])\n\n score = QLabel(self)\n score.setText(\"评分: \" + evaluate[2] * \"★\")\n\n comment = QLabel(self)\n comment.setText(\"评价内容: \" + evaluate[3])\n\n order_id = QLabel(self)\n order_id.setText(\"订单号:%d\" % evaluate[0])\n\n text_layout = QVBoxLayout()\n text_layout.addWidget(order_id)\n text_layout.addWidget(name)\n text_layout.addWidget(score)\n text_layout.addWidget(comment)\n text_widget = QWidget()\n text_widget.setLayout(text_layout)\n\n main_layout = QHBoxLayout()\n main_layout.addWidget(pic)\n main_layout.addWidget(text_widget)\n groupBox.setLayout(main_layout)\n return groupBox\n\n def pull_apply(self):\n self.dialog = QDialog()\n self.dialog.setFixedSize(500, 160)\n self.dialog.setWindowIcon(QIcon(\"./image/icon.jpg\"))\n\n input_name = QLineEdit(self.dialog)\n input_name.setFont(QFont(\"宋体\", 12))\n input_name.resize(450, 50)\n input_name.setPlaceholderText(\"输入商家ID\")\n input_name.move(25, 20)\n input_name.setMaxLength(50)\n\n confirm = QPushButton(\"发送申请\", self.dialog)\n confirm.move(25, 90)\n confirm.resize(450, 50)\n confirm.setFont(QFont('幼圆', 12))\n confirm.setStyleSheet(\"background-color: rgb(51, 153, 255)\")\n confirm.clicked.connect(\n lambda: self.confirm_pull_apply(input_name.text()))\n\n self.dialog.setWindowTitle(\"申请签约\")\n self.dialog.setWindowModality(Qt.ApplicationModal)\n self.dialog.exec_()\n\n def confirm_pull_apply(self, name):\n if len(name) == 0:\n QMessageBox.critical(self, \"饱了没\", \"名称不能为空!\")\n else:\n sql = \"select count(*) from `users` where `username` = '%s' and `user_type` = 'merchant'\" % name\n self.cursor.execute(sql)\n if self.cursor.fetchone()[0] == 0:\n QMessageBox.critical(self, \"饱了没\", \"该商家不存在!\")\n else:\n sql = \"select count(*) from `apply` where `rider_name` = '%s' \" \\\n \"and `merchant_name` = '%s'\" % (self.info[0], name)\n self.cursor.execute(sql)\n if self.cursor.fetchone()[0] != 0:\n QMessageBox.critical(self, \"饱了没\", \"申请已存在!\\n请等待商家处理!\")\n return\n sql = \"select count(*) from `contract` where `rider_name` = '%s' \" \\\n \"and `merchant_name` = '%s'\" % (self.info[0], name)\n self.cursor.execute(sql)\n if self.cursor.fetchone()[0] != 0:\n QMessageBox.critical(self, \"饱了没\", \"契约已存在!\\n请勿重复操作\")\n return\n try:\n sql = \"insert into `apply` values('%s','%s', 1)\" % (self.info[0], name)\n self.cursor.execute(sql)\n self.connect.commit()\n QMessageBox.about(self, \"饱了没\", \"发送成功!\")\n self.dialog.close()\n except:\n QMessageBox.critical(self, \"饱了没\", \"发送失败!\\n请检查数据库连接!\")\n\n def func_contract_button(self):\n self.info_refresh()\n sql = \"select * from `apply` where `rider_name` = '%s' and `direction` = 0\" % self.info[0]\n self.cursor.execute(sql)\n all_apply = self.cursor.fetchall()\n self.dialog = QDialog()\n self.dialog.setFixedSize(1400, 1400)\n self.dialog.setWindowIcon(QIcon(\"./image/icon.jpg\"))\n self.dialog.setWindowTitle(\"签约商家\")\n self.dialog.setWindowModality(Qt.ApplicationModal)\n self.dialog.setPalette(self.login_palette)\n\n launch = QPushButton(\"发出申请\", self.dialog)\n launch.resize(180, 50)\n launch.setFont(QFont(\"宋体\", 12))\n launch.setStyleSheet(\"background-color: rgb(51, 153, 255)\")\n launch.move(50, 20)\n launch.clicked.connect(self.pull_apply)\n\n request_list = QScrollArea(self.dialog)\n request_list.move(0, 100)\n request_list.resize(1400, 1300)\n items = QWidget()\n vLayout = QVBoxLayout(items)\n for apply in all_apply:\n vLayout.addWidget(self.create_apply_item(apply))\n request_list.setWidget(items)\n self.dialog.exec_()\n\n def create_apply_item(self, apply):\n groupBox = QGroupBox(self)\n sql = \"select * from `merchant` where `username` = '%s'\" % apply[1]\n self.cursor.execute(sql)\n merchant_info = self.cursor.fetchone()\n pic = QLabel(self)\n pic.setPixmap(QPixmap(merchant_info[10]))\n pic.setScaledContents(True)\n pic.resize(50, 50)\n pic.move(0, 0)\n\n name = QLabel(self)\n name.setText(\"申请商家: \" + merchant_info[0])\n sum_order = QLabel(self)\n sum_order.setText(\"商家总单数: \" + str(merchant_info[3]))\n\n score = QLabel(self)\n if merchant_info[12] == 0:\n score.setText(\"商家评分: 0.0\")\n else:\n score.setText(\"商家评分: %.1f\" % (merchant_info[4] / merchant_info[12]))\n address = QLabel(self)\n address.setText(\"商家地址: (%d, %d)\" % (merchant_info[6], merchant_info[7]))\n\n text_layout = QVBoxLayout()\n text_layout.addWidget(name)\n text_layout.addWidget(sum_order)\n text_layout.addWidget(score)\n text_layout.addWidget(address)\n text_widget = QWidget()\n text_widget.setLayout(text_layout)\n\n accept_button = QPushButton(\"接受\", self)\n accept_button.setStyleSheet(\"background-color: rgb(51, 153, 255)\")\n accept_button.clicked.connect(lambda: self.accept_contract(accept_button, merchant_info[0]))\n\n button_layout = QVBoxLayout()\n button_layout.addWidget(accept_button)\n button_widget = QWidget()\n button_widget.setLayout(button_layout)\n\n main_layout = QHBoxLayout()\n main_layout.addWidget(pic)\n main_layout.addWidget(text_widget)\n main_layout.addWidget(button_widget)\n groupBox.setLayout(main_layout)\n return groupBox\n\n def accept_contract(self, btn, name):\n try:\n sql1 = \"delete from `apply` \" \\\n \"where `rider_name` = '%s'\" \\\n \"and `merchant_name` = '%s'\" % (self.info[0], name)\n sql2 = \"insert into `contract` values('%s','%s')\" % (self.info[0], name)\n self.cursor.execute(sql1)\n self.cursor.execute(sql2)\n self.connect.commit()\n btn.setText(\"已接受\")\n btn.setStyleSheet(\"background-color: rgb(232, 232, 232)\")\n except:\n self.connect.rollback()\n QMessageBox.critical(self, \"饱了没\", \"接受失败!\\n请检查数据库连接!\")\n\n\n def change_password_dialog(self):\n self.dialog = QDialog()\n self.dialog.setFixedSize(500, 360)\n self.dialog.setWindowIcon(QIcon(\"./image/icon.jpg\"))\n\n origin_password = QLineEdit(self.dialog)\n origin_password.setFont(QFont(\"宋体\", 12))\n origin_password.resize(450, 50)\n origin_password.setPlaceholderText(\"输入原密码\")\n origin_password.move(25, 20)\n origin_password.setEchoMode(QLineEdit.Password)\n origin_password.setMaxLength(18)\n\n new_password = QLineEdit(self.dialog)\n new_password.setFont(QFont(\"宋体\", 12))\n new_password.resize(450, 50)\n new_password.setPlaceholderText(\"输入新密码\")\n new_password.move(25, 100)\n new_password.setEchoMode(QLineEdit.Password)\n new_password.setMaxLength(18)\n\n confirm_password = QLineEdit(self.dialog)\n confirm_password.setFont(QFont(\"宋体\", 12))\n confirm_password.resize(450, 50)\n confirm_password.setPlaceholderText(\"确认新密码\")\n confirm_password.move(25, 180)\n confirm_password.setEchoMode(QLineEdit.Password)\n confirm_password.setMaxLength(18)\n\n confirm = QPushButton(\"修改密码\", self.dialog)\n confirm.move(25, 260)\n confirm.resize(450, 50)\n confirm.setFont(QFont('幼圆', 12))\n confirm.setStyleSheet(\"background-color: rgb(51, 153, 255)\")\n confirm.clicked.connect(\n lambda: self.confirm_change_password([origin_password.text(), new_password.text(), confirm_password.text()]))\n\n self.dialog.setWindowTitle(\"修改密码\")\n self.dialog.setWindowModality(Qt.ApplicationModal)\n self.dialog.exec_()\n\n def confirm_change_password(self, passwords):\n if passwords[0] != self.info[1]:\n QMessageBox.critical(self, \"饱了没\", \"原密码错误!\")\n elif len(passwords[1]) < 6:\n QMessageBox.critical(self, \"饱了没\", \"密码长度过短!\")\n elif passwords[1] != passwords[2]:\n QMessageBox.critical(self, \"饱了没\", \"两次输入密码不一致!\")\n else:\n sql = \"update users set `password` = '%s' where `username` = '%s'\" % (passwords[1], self.info[0])\n try:\n self.cursor.execute(sql)\n self.connect.commit()\n self.info[1] = passwords[1]\n self.dialog.close()\n except:\n QMessageBox.critical(self, \"饱了没\", \"修改失败!\\n请检查数据库连接!\")\n\n def change_order(self, var):\n self.order_index += var\n self.display_order(self.order_index)\n\n def display_order(self, index):\n self.order_index = index\n self.show_order.setText(str(self.order_index + 1))\n if index == 0:\n self.pre_order.setVisible(False)\n else:\n self.pre_order.setVisible(True)\n\n if index == self.sum_order - 1:\n self.next_order.setVisible(False)\n else:\n self.next_order.setVisible(True)\n sql = \"select * from `sell` where `order_id` = %d\" % self.all_orders[self.order_index][0]\n self.cursor.execute(sql)\n self.all_product = self.cursor.fetchall()\n items = QWidget()\n vLayout = QVBoxLayout(items)\n for product in self.all_product:\n vLayout.addWidget(self.create_product_item_2(product))\n self.products.setWidget(items)\n self.update()\n\n def create_product_item_2(self, product):\n groupBox = QGroupBox(self)\n num = product[2]\n sql = \"select * from `product` where `product_id` = %d\" % product[1]\n self.cursor.execute(sql)\n product = self.cursor.fetchone()\n price = num*product[1]\n pic = QLabel(self)\n pic.setPixmap(QPixmap(product[3]))\n pic.setScaledContents(True)\n pic.resize(150, 150)\n pic.move(0, 0)\n\n name = QLabel(self)\n tmp = product[4]\n length = 25\n if(len(tmp)) > length: tmp = tmp[:length]\n while(len(tmp)) < length: tmp += \" \"\n name.setText(tmp)\n num_label = QLabel(\"x %d\" % num, self)\n price_label = QLabel(\"¥%.2f\" % price, self)\n\n text_layout = QVBoxLayout()\n text_layout.addWidget(name)\n text_layout.addWidget(num_label)\n text_widget = QWidget()\n text_widget.setLayout(text_layout)\n\n price_layout = QVBoxLayout()\n price_layout.addWidget(price_label)\n price_widget = QWidget()\n price_widget.setLayout(price_layout)\n\n main_layout = QHBoxLayout()\n main_layout.addWidget(pic)\n main_layout.addWidget(text_widget)\n main_layout.addWidget(price_widget)\n groupBox.setLayout(main_layout)\n return groupBox\n\n def paintEvent(self, event):\n qp = QPainter()\n qp.begin(self)\n if self.INDEX == self.ORDER:\n self.paint_order(event, qp)\n elif self.INDEX == self.PERSONAL:\n self.paint_personal(event, qp)\n qp.end()\n\n def val(self, sql):\n self.cursor.execute(sql)\n return self.cursor.fetchone()[0]\n\n def paint_order(self, event, qp):\n item = self.all_orders[self.order_index]\n qp.setPen(QColor(0, 0, 0))\n qp.setFont(QFont(\"宋体\", 12))\n qp.drawText(20, 250, \"订单号: \"+str(item[0]))\n tmp = [\"已下单\", \"商家已接单\", \"骑手正在配送\", \"已送达\"]\n qp.drawText(20, 300, \"订单状态: \" + tmp[item[5]])\n qp.drawText(20, 350, \"下单用户: \" + item[6])\n sql = \"select `name` from merchant where `username` = '%s'\" % item[8]\n qp.drawText(20, 400, \"商家名称: \" + self.val(sql))\n if item[7] is not None: qp.drawText(20, 450, \"配送骑手: \" + item[7])\n else: qp.drawText(20, 450, \"配送骑手: 暂无\")\n if item[5] == 3: qp.drawText(400, 300, \"送达时间: \"+str(item[4]))\n qp.drawText(400, 350, \"下单时间: \"+str(item[1]))\n tmp = \"暂无\"\n if item[2] is not None: tmp = str(item[2])\n qp.drawText(400, 400, \"接单时间: \"+tmp)\n tmp = \"暂无\"\n if item[3] is not None: tmp = str(item[3])\n qp.drawText(400, 450, \"配送时间: \"+tmp)\n sql = \"select sumMoney(%d)\" % item[0]\n qp.drawText(20, 500, \"订单金额: \" + str(self.val(sql)))\n tmp = 0\n for e in self.all_product: tmp += e[2]\n qp.drawText(400, 500, \"商品总数: \" + str(tmp))\n\n def paint_personal(self, event, qp):\n qp.setPen(QColor(0, 0, 0))\n qp.setFont(QFont(\"宋体\", 12))\n qp.drawText(350, 250, \"ID:\" + self.info[0])\n d = {\"common\": \"初级\", \"medium\": \"中级\", \"expert\": \"高级\"}\n qp.drawText(350, 340, \"等级:\"+d[self.info[7]])\n qp.drawText(350, 435, \"注册:\" + self.info[5].strftime('%Y-%m-%d'))\n qp.drawText(70, 540, \"邮箱: \" + self.info[4])\n qp.drawText(70, 645, \"电话: \" + self.info[3])\n qp.drawText(70, 750, \"收入: \" + str(float(self.info[13])) + \"元\")\n if self.info[12] == 0: qp.drawText(70, 855, \"评分: 0.0\")\n else: qp.drawText(70, 855, \"评分: %.1f\" % (self.info[9]/self.info[12]))\n qp.drawText(70, 960, \"总订单数: \" + str(int(self.info[8])))\n qp.drawText(70, 1065, \"地址: (%d, %d)\" % (int(self.info[10]), int(self.info[11])))\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n win = Rider(\"rider\")\n win.show()\n sys.exit(app.exec_())\n pass\n","sub_path":"source_code/rider.py","file_name":"rider.py","file_ext":"py","file_size_in_byte":48519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"308087097","text":"import os\nimport pickle\nimport re\nimport numpy as np\n\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\n\ndef log_print(text, logger, log_only = False):\n if not log_only:\n print(text)\n if logger is not None:\n logger.info(text)\n\ndef get_module_name(obj):\n try:\n return re.findall(r\"[A-Za-z0-9]+'\",str(type(obj)))[0].replace(\"'\",'')\n except:\n return \"Name not found\"\n\ndef upload_google_sheets(upload_dict, spreadsheet_id = '1nliojVYnyy-42Sy-OFy3rB28dOdNakTA06oHa-Gyg9c', sheet_name = 'CV_Record', logger = None):\n SCOPES = ['https://www.googleapis.com/auth/spreadsheets']\n\n # Authentication\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('sheets', 'v4', credentials=creds)\n\n # Get Current Row Number and headings\n sheet = service.spreadsheets()\n currentRowNumber = len(sheet.values().get(spreadsheetId=spreadsheet_id, range=f'{sheet_name}!1:1000').execute()['values'])\n headings = sheet.values().get(spreadsheetId=spreadsheet_id, range=f'{sheet_name}!1:1').execute()['values'][0]\n\n upload_list = []\n for header in headings:\n if header in upload_dict.keys():\n upload_list.append(str(upload_dict[header]))\n else:\n upload_list.append('')\n\n # Upload to Google Sheets\n body = {\n 'values': [upload_list]\n }\n result = service.spreadsheets().values().update(\n spreadsheetId=spreadsheet_id, range=f'{sheet_name}!{currentRowNumber+1}:{currentRowNumber+1}',\n valueInputOption='USER_ENTERED', body=body).execute()\n log_print('Results Uploaded', logger)\n\ndef compute_cm_binary(y_pred, y):\n TP = np.logical_and(y_pred == 1, y == 1).sum()\n TN = np.logical_and(y_pred == 0, y == 0).sum()\n FP = np.logical_and(y_pred == 1, y == 0).sum()\n FN = np.logical_and(y_pred == 0, y == 1).sum()\n return TN, FP, FN, TP\n\ndef get_iou_score(y_pred, y, threshold):\n y_pred = np.where(y_pred > threshold, 1, 0)\n tn ,fp, fn, tp = compute_cm_binary(y_pred, y)\n iou_score = (2*tp) / (2*tp + fp + fn)\n return iou_score","sub_path":"utils/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"253501998","text":"#!/bin/usr/env python\n\nimport scapy.all as sc\nimport utility.utils as utils\nfrom Cryptodome.PublicKey import RSA\n\nclass TLSState:\n '''\n Holds the TLS connection info and create packets\n '''\n\n\n def __init__(self, client_hello, sul):\n self.client_hello = client_hello\n self.tls_version = 'TLS_1_2'\n self.sul = sul\n self.private = RSA.generate(1024)\n self.public = self.private.publickey()\n\n def server_key(self):\n return (self.server_hello\n .records[1]\n .handshakes[0][TLSCertificate]\n .data\n .tbsCertificate\n .subjectPublicKeyInfo\n .subjectPublicKey)\n\n def server_hello(self, server_hello):\n self.server_hello = server_hello\n\n def encrypt(self, message):\n return server_key().encrypt(message)\n\n def decrypt(self, ciphertext):\n return self.private.decrypt(ciphertext)\n\n def client_key_exchange(self):\n pac = self.sul.queries['HEADER']\n\n handshake = (sc.TLSRecord(content_type='handshake',\n version=self.tls_version) /\n sc.TLSHandshakes(handshakes=[\n sc.TLSHandshake(type='client_key_exchange') /\n sc.TLSClientKeyExchange() /\n sc.TLSClientRSAParams(data=\n utils.long2bytes(self.public.n))]))\n\n change_cipher_spec = (sc.TLSRecord(content_type='change_cipher_spec',\n version=self.tls_version) /\n sc.TLSChangeCipherSpec(message=b'\\x01'))\n\n ciphertext = (sc.TLSRecord(content_type='handshake',\n version=self.tls_version) /\n sc.TLSCiphertext(data=''))\n\n pac = pac / sc.SSL(records=[\n handshake,\n change_cipher_spec,\n ciphertext\n ])\n\n return pac\n","sub_path":"src/TLSState.py","file_name":"TLSState.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"315672820","text":"import requests # https://requests.readthedocs.io/en/master/user/quickstart/\nfrom bs4 import BeautifulSoup\ndef main():\n url = 'http://www.allocine.fr/'\n r = requests.get(url)\n print(url, r.status_code)\n soup = BeautifulSoup(r.content, 'lxml')\n soup\n for p in soup.find_all('a'): # an 'a' tag refers to hyperlinks\n print(p.text)\n for elem in soup.find_all('a', attrs={\"class\": \"meta-title meta-title-link\"}):\n print(elem)\n # print href's\n for elem in soup.find_all('a', attrs={\"class\": \"meta-title meta-title-link\"}):\n print(elem.get('href'))\n # return a list\n # print titles\n for elem in soup.find_all('a', attrs={\"class\": \"meta-title meta-title-link\"}):\n print(elem.get('title'))\n\n\nmain()","sub_path":"02 Python Advanced/Python Advanced 5.Web Scrapping/Data Web Scraping - Targeted Information Retrieval.py","file_name":"Data Web Scraping - Targeted Information Retrieval.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"611111668","text":"from django.conf.urls import patterns, url, include\n\n__author__ = 'jyrkila'\n\nurlpatterns = patterns('kirppu.app.views',\n url(r'^page/?$', 'get_items', name='page'),\n url(r'^clerks/?$', 'get_clerk_codes', name='clerks'),\n url(r'^commands/$', 'get_commands', name='commands'),\n url(r'^command/(?P::\\w+?)\\.(?P\\w+)$',\n 'get_barcode', name='command_barcode'),\n url(r'^barcode/(?P\\w+?)\\.(?P\\w+)$',\n 'get_barcode', name='barcode'),\n url(r'^checkout/$', 'checkout_view'),\n\n url(r'^vendor/$', 'vendor_view', name='vendor_view'),\n url(r'^vendor/item/$', 'item_add', name='item_add'),\n url(r'^vendor/item/(?P\\w+?)/$', 'item_view', name='item_delete'),\n url(r'^vendor/item/(?P\\w+?)/price$', 'item_update_price', name='item_update_price'),\n url(r'^vendor/item/(?P\\w+?)/name$', 'item_update_name', name='item_update_name'),\n url(r'^vendor/item/(?P\\w+?)/type$', 'item_update_type', name='item_update_type'),\n url(r'^vendor/logout/?$', 'logout_view', name='logout_view'),\n\n url(r'^api/', include('kirppu.app.checkout.urls')),\n)\n","sub_path":"kirppu/app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"386160327","text":"# -*- coding: utf-8 -*-\n# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 3\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see \n#\n# ##### END GPL LICENSE BLOCK #####\n\nfrom setuptools import setup, find_packages\n\n\n__author__ = \"Lothar Krause\"\n__maintainer__ = \"Sergi Blanch-Torné\"\n__email__ = \"sblanch@cells.es\"\n__copyright__ = \"Copyright 2017, CELLS / ALBA Synchrotron\"\n__license__ = \"GPLv3+\"\n\n__project__ = 'HazemeyerIocaste_PS'\n__description__ = \"Python module to control via Tango-CS the Hazemeyer \"\\\n \"power converter controlled called Iocaste.\"\n__longDesc__ = \"\"\"\nThis module has been prepared to provide control using the Tango-CS of the\npower converter from the Hazemeyer manufacturer.\n\"\"\"\n__url__ = \"https://github.com/srgblnch/HazemeyerIocaste_PS\"\n# we use semantic versioning (http://semver.org/) and we update it using the\n# bumpversion script (https://github.com/peritus/bumpversion)\n__version__ = '1.29.0-alpha'\n\n\nsetup(name=__project__,\n license=__license__,\n description=__description__,\n long_description=__longDesc__,\n version=__version__,\n author=__author__,\n author_email=__email__,\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: '\n 'GNU General Public License v3 or later (GPLv3+)',\n 'Operating System :: POSIX',\n 'Programming Language :: Python',\n 'Topic :: Scientific/Engineering :: '\n 'Interface Engine/Protocol Translator',\n 'Topic :: Software Development :: Embedded Systems',\n 'Topic :: Software Development :: Libraries :: '\n 'Python Modules',\n 'Topic :: System :: Hardware',\n ],\n packages=find_packages(),\n url=__url__,\n entry_points={\n 'console_scripts':['HazemeyerIocaste_PS = HazemeyerIocaste_PS.main']},\n )\n\n# for the classifiers review see:\n# https://pypi.python.org/pypi?%3Aaction=list_classifiers\n#\n# Development Status :: 1 - Planning\n# Development Status :: 2 - Pre-Alpha\n# Development Status :: 3 - Alpha\n# Development Status :: 4 - Beta\n# Development Status :: 5 - Production/Stable\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"642040501","text":"__author__ = 'romandeles'\n# coding: utf-8\n\"\"\"\n1. создаём 3 отдельных класса (танк, машина,телега)\n2. у каждого класса метод status, выводящий состояине объекта на данный момент\n3. Создаём и собираем сколько-то новых объектов этих классов в список cars\n4. Делаем несколько действий с этими объектами ( например, назначили машине audi скорость 90, у танка т-34 сняли шасси)\n5. в конце программы выводим состояния всех объектов из сars\n\"\"\"\n\n\nclass Tank:\n def __init__(self, shassi=int, model=str, speed=int, gusenici=bool):\n self.shassi = shassi\n self.gusenici = gusenici\n self.model = model\n self.speed = speed\n\n def status(self):\n print(\"Shassi = {0}, gusenici is {1}, speed = {2} Km/H, model = {3}\".format(self.shassi, self.gusenici, self.speed,\n self.model))\n if self.speed > 0 and self.gusenici is True:\n print('Health check is ok')\n else:\n print('Health check is failed')\n\n\nclass Car:\n def __init__(self, speed=int, model=str, kolesa=4):\n self.speed = speed\n self.kolesa = kolesa\n self.model = model\n\n def status(self):\n print('Speed = {0} Km/H, kolesa = {1}, model is {2}'.format(self.speed, self.kolesa, self.model))\n if self.speed > 0 and self.kolesa == 4:\n print('Health check is ok')\n else:\n print('Health check is failed')\n\n\nclass Telega:\n def __init__(self, kolesa=int, speed=int):\n self.kolesa = kolesa\n self.speed = speed\n\n def status(self):\n print('Kolesa = {0} Km/H, speed = {1}'.format(self.kolesa, self.speed))\n if self.kolesa == 2 and self.speed > 0:\n print('Health check is ok')\n else:\n print('Health check is failed')\n\n\nPatton = Tank(10, 'M46 Patton 2', 50, True)\nBMW = Car(speed=30,model=\"M3\")\nTelega_1 = Telega(2, 5)\n\ncars = [Patton, BMW, Telega_1]\n\nprint('Default values is: ')\nfor i in cars:\n i.status()\n\nprint('========================================')\n\nBMW.kolesa = 3\nTelega_1.speed = 0\nPatton.gusenici = False\n\nprint('Some changes was happened')\nfor i in cars:\n i.status()","sub_path":"python_local_courses/dz-7/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"518642423","text":"from base import *\nfrom mod_side import ModSide\n\nclass MatchRequest(Base):\n __tablename__ = 'matchrequest'\n\n matchrequest_id = Column(Integer, primary_key=True)\n league_id = Column(Integer, ForeignKey('league.league_id'), nullable=True)\n match_configuration_id = Column(Integer, ForeignKey('matchconfiguration.matchconfiguration_id'), nullable=False)\n\n match_configuration = relationship(\"MatchConfiguration\")\n league = relationship(\"League\")\n matchrequestinprogress = relationship(\"MatchRequestInProgress\",\n uselist=False)\n matchresult = relationship(\"MatchResult\", uselist=False)\n\n def __init__(self, match_configuration_id, league_id = None):\n self.match_configuration_id = match_configuration_id\n self.league_id = league_id\n","sub_path":"webserver/website/springgrid/model/entity/match_request.py","file_name":"match_request.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"197523028","text":"from functools import partial\nfrom itertools import permutations, combinations_with_replacement, chain\nfrom operator import add, sub, and_, or_, invert\nfrom typing import List\nfrom flask import Flask,render_template,request\napp = Flask(__name__)\n\ndef get_permut(binstrs: List[str], targetstr: str):\n \"\"\"找到合法的后缀表达式,binstrs为现在有哪些二进制串,targetstr为想要拼成哪个二进制串\"\"\"\n target = int(targetstr, base=2)\n nums = list(map(partial(int, base=2), binstrs)) # 计算二进制串相应的值\n print(f\"现有{nums},正在寻找能够拼出{target}的方法...\")\n cals = [add, sub, and_, or_, invert] # 所有合法运算符\n # 遍历所有可能的后缀表达式\n for ivt_num in range(len(nums)+1): # 取反放在最外层循环,因为可能不需要这么多次取反就可以组合出来\n for cal_comb in combinations_with_replacement(cals[0:4], len(nums)-1):\n for permut in permutations(chain(nums, cal_comb, [invert]*ivt_num)):\n stack = []\n flag = True\n for p in permut:\n if isinstance(p, int):\n stack.append(p)\n else:\n if len(stack) < 1:\n flag = False\n break\n elif len(stack) < 2:\n if p == invert:\n stack[-1] = p(stack[-1])\n else:\n flag = False\n break\n else:\n if p == invert:\n stack[-1] = p(stack[-1])\n else:\n tmp = p(stack.pop(), stack.pop())\n stack.append(tmp)\n if flag and len(stack) == 1 and stack[-1] == target:\n return permut\n return False\n\n\ndef permut2str(permut):\n \"\"\"将后缀表达式转为正常表达式\"\"\"\n calsign = {add: '+', sub: '-', and_: '&', or_: '|'}\n bin_stack, dec_stack = [], [] # 分别用二进制和十进制表示数字,执行完全一样的操作\n for p in permut:\n if isinstance(p, int):\n bin_stack.append(bin(p)[2:])\n dec_stack.append(str(p))\n else:\n if len(bin_stack) < 1:\n return \"\"\n elif len(bin_stack) < 2:\n if p == invert:\n bin_stack[-1] = f\"~({bin_stack[-1]})\"\n dec_stack[-1] = f\"~({dec_stack[-1]})\"\n else:\n return \"\"\n else:\n if p == invert:\n bin_stack[-1] = f\"~({bin_stack[-1]})\"\n dec_stack[-1] = f\"~({dec_stack[-1]})\"\n else:\n bin_stack.append(\"(\" + bin_stack.pop() + calsign[p] + bin_stack.pop() + \")\")\n dec_stack.append(\"(\" + dec_stack.pop() + calsign[p] + dec_stack.pop() + \")\")\n if len(bin_stack) == 1:\n return bin_stack[-1], dec_stack[-1]\n else:\n return \"\"\n\n@app.route('/')\ndef solve():\n return render_template('index.html', ans=\"答案区\")\n\n@app.route('/',methods=['POST','GET'])\ndef deal():\n if request.method == 'POST' :\n a_num = int(request.form[\"a_num\"])\n b_num = int(request.form[\"b_num\"])\n c_num = int(request.form[\"c_num\"])\n ticket_list = []\n for i in range(a_num) :\n ticket_list.append('10011')\n for i in range(b_num) :\n ticket_list.append('00011')\n for i in range(c_num) :\n ticket_list.append('01011')\n s = \"\"\n permut = get_permut(ticket_list, '101001')\n if permut is False:\n s=\"没有可用的组合\"\n else:\n bin_ans, dec_ans = permut2str(permut)\n \n s=\"结果为\"+bin_ans+\"\\n对应的十进制表示为\"+dec_ans+\"\\n您的票有:10011:%d张,00011:%d张,01011:%d张\"%(a_num,b_num,c_num)\n return render_template('index.html',ans=s)\n\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',debug=false)","sub_path":"bin_permutation.py","file_name":"bin_permutation.py","file_ext":"py","file_size_in_byte":4186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"607565209","text":"from selenium.common.exceptions import NoSuchElementException\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver import ActionChains\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.common.exceptions import TimeoutException\r\nfrom xmltodict import xmltodict\r\nimport logging\r\nimport ScreenOutputLogging\r\nimport ScreenControlDuringExecution\r\nimport Utils\r\nimport time\r\n\r\n\r\n# #####################################################################################################################\r\ndef s_check_boxes_multiselect_06(driver, run_speed, field_id, field_order, field_info, field_name, field_value,\r\n print_log_setting=1, removal_excl_beh=\"\"\"[0, '']\"\"\", form_name='Test_Form'):\r\n \"\"\"This takes a field data specification for a checkbox field, logs the action and enters the data\"\"\"\r\n print(removal_excl_beh)\r\n remove_non_matching_values = removal_excl_beh[0]\r\n print('remove_non_matching_values {x}'.format(x=remove_non_matching_values))\r\n excl_opt_val_w_caret = removal_excl_beh[1]\r\n print('excl_opt_val_w_caret {x}'.format(x=excl_opt_val_w_caret))\r\n logging.basicConfig(filename='example.log', filemode='w',\r\n level=logging.INFO) # write over existing log in this file\r\n ScreenOutputLogging.pretty_log_field('start', field_order, field_id, field_name, field_info,\r\n print_log_setting)\r\n ScreenOutputLogging.pretty_log_data_value(field_value, print_log_setting)\r\n # To do - cater for when remove_non_matching_values == 1 and especially when field_value == 'NULL'\r\n try:\r\n label_xpath = '''//div/b/span[.=\"''' + field_name + '''\"]'''\r\n print(label_xpath)\r\n field_label = driver.find_element_by_xpath(label_xpath)\r\n label_parent_div_xpath = '''//div[div/b/span[.=\"''' + field_name + '''\"]]'''\r\n print(label_parent_div_xpath)\r\n # test for fe visible... #\r\n if field_label.is_displayed(): # proceed to consider entering data #\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 3, label_xpath, 'label xpath')\r\n ScreenControlDuringExecution.scroll_to(driver, field_name, label_xpath)\r\n logging.info('{ord} {label} is visible'.format(ord=field_order, label=field_name))\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, field_order + ' ' + field_name,\r\n 'visible')\r\n print('label is displayed')\r\n\r\n # ----- get the page set of elements as a requirement for the work\r\n checkbox_page_list = Utils.get_page_set_elements(driver, field_name, 'checkbox', print_log_setting)\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, checkbox_page_list, 'checkbox_page_list')\r\n print('got page list')\r\n\r\n # ----- get the user set for comparison\r\n # checkbox_user_list = Utils.py_list_my_checkbox_user_options(field_value)\r\n checkbox_user_list = Utils.get_user_set_values(field_value, 'checkbox', print_log_setting)\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, checkbox_user_list, 'checkbox_user_list')\r\n print('got user list')\r\n\r\n # exclusive options (caretted values)\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 3, excl_opt_val_w_caret,\r\n 'excl_opt_val_w_caret')\r\n print(excl_opt_val_w_caret)\r\n\r\n excl_opt_set = Utils.py_list_me(excl_opt_val_w_caret)\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 3, excl_opt_set,\r\n 'excl_opt_set')\r\n print(excl_opt_set)\r\n if excl_opt_val_w_caret == '':\r\n pass\r\n else:\r\n # if an exclusive option is in the user set, deal with that first, it could be quicker\r\n opts_xpath = '''//input[@label=\"''' + field_name + '''\"]'''\r\n excl_opt_xpaths = []\r\n for excl_opt_val in excl_opt_set:\r\n excl_opt_vl = excl_opt_val.replace('^', '')\r\n print('-- 0: {x}'.format(x=excl_opt_vl))\r\n excl_opt_v = \"'\" + excl_opt_vl + \"'\"\r\n print('-- p: {x}'.format(x=excl_opt_v))\r\n excl_opt_xpath = opts_xpath + '''[@value=\"''' + excl_opt_v + '''\"]'''\r\n print('-- q: {x}'.format(x=excl_opt_xpath))\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 3, excl_opt_xpath, 'excl_opt_xpath')\r\n excl_opt_xpaths.append(excl_opt_xpath)\r\n # opt_xpath = opts_xpath + \"\"\"[@value=\"'\"\"\" + excl_opt_vl + \"\"\"'\"]\"\"\"\r\n # print('-- r: {x}'.format(x=opt_xpath))\r\n\r\n\r\n print('----------------------------')\r\n print(excl_opt_v)\r\n print(checkbox_user_list)\r\n print('----------------------------')\r\n\r\n opt_el = driver.find_element_by_xpath(excl_opt_xpath)\r\n if opt_el.is_selected():\r\n if excl_opt_v in checkbox_user_list:\r\n print('-- hooray: {x}'.format(x=excl_opt_v))\r\n pass\r\n else: # didn't want this one\r\n # uncheck it\r\n print('-- ah ah! tricky dude! - now just uncheck!')\r\n opt_el.click()\r\n else: # not selected\r\n if excl_opt_v in checkbox_user_list:\r\n print('-- bide our time... may not be visible')\r\n else:\r\n print('-- did not want you any how!')\r\n pass\r\n print('----------------------------')\r\n\r\n # iterate the page set and compare with the user set under the different conditions\r\n for page_option_attr_set in checkbox_page_list:\r\n page_option_el = page_option_attr_set[0]\r\n # page_option_id = page_option_attr_set[1]\r\n page_option_vl = page_option_attr_set[2]\r\n print('-- xx: {x}'.format(x=page_option_vl))\r\n if remove_non_matching_values == \"\"\"0\"\"\": # don't act on them...\r\n if not page_option_el.is_selected():\r\n # ie, not currently checked\r\n if page_option_vl in checkbox_user_list:\r\n page_option_el.click() # imposed user-wish on page by checking\r\n else: # strict unchecking # do act on clearing non-matching values...\r\n if page_option_el.is_selected():\r\n if page_option_vl not in checkbox_user_list:\r\n page_option_el.click() # must impose on page by unchecking\r\n else:\r\n # ie, not currently checked\r\n if page_option_vl in checkbox_user_list:\r\n page_option_el.click() # must impose on page by checking\r\n # end\r\n\r\n if checkbox_user_list[-2] == \"\"\"'Other'\"\"\":\r\n other_free_text_value = checkbox_user_list[-1]\r\n # establish the free text element\r\n free_text_xpath = label_parent_div_xpath + '''//td[2]//div/input'''\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 3, free_text_xpath, 'free_text_xpath')\r\n # update the free text part\r\n free_text_element = driver.find_element_by_xpath(free_text_xpath)\r\n free_text_element.clear()\r\n free_text_element.send_keys(other_free_text_value)\r\n\r\n ScreenControlDuringExecution.set_speed(driver, run_speed, field_value)\r\n folder_name = form_name[:9]\r\n picture_filename = form_name + \"_\" + field_order + \"_\" + field_name + '_' + field_id + \"_\" + field_info\r\n ScreenOutputLogging.Logging.save_screenshot_picture_stringbased(driver, folder_name, picture_filename)\r\n logging.info('{ord} - finished processing'.format(ord=field_order + \"_\" + field_id))\r\n except:\r\n raise NoSuchElementException(\"Could not find field element in time\")\r\n\r\n\r\n# #####################################################################################################################\r\ndef s_table_list_multiselect_17(driver, run_speed, field_id, field_order, field_info, field_name, field_value,\r\n print_log_setting=1, behaviour=\"\"\"\"\"\", form_name='Test_Form'):\r\n \"\"\"This takes a field data specification for a standard time field, logs the action and enters the data\"\"\"\r\n logging.basicConfig(filename='example.log', filemode='w',\r\n level=logging.INFO) # write over existing log in this file\r\n ScreenOutputLogging.pretty_log_field('start', field_order, field_id, field_name, field_info,\r\n print_log_setting)\r\n ScreenOutputLogging.pretty_log_data_value(field_value, print_log_setting)\r\n\r\n try:\r\n label_xpath = '''//div/b/span[.=\"''' + field_name + '''\"]'''\r\n field_label = driver.find_element_by_xpath(label_xpath)\r\n # test for fe visible... #\r\n if field_label.is_displayed(): # proceed to consider entering data #\r\n if print_log_setting >= 2:\r\n print('label xpath {xpath}'.format(xpath=label_xpath))\r\n ScreenControlDuringExecution.scroll_to(driver, field_name, label_xpath)\r\n logging.info('{ord} {label} is visible'.format(ord=field_order, label=field_name))\r\n # assign opt xpath elements... #\r\n # print(field_value)\r\n field_values_list = field_value.split(', ')\r\n try:\r\n xpath = '''//div[contains(@id,\"proxy\")][@label=\"''' + field_name + '''\"]'''\r\n # print(xpath)\r\n element_to_use = driver.find_element_by_xpath(xpath)\r\n element_to_use.click()\r\n # print('hopefully, that opened the popout...')\r\n if field_values_list[0] != 'None':\r\n for item_value in field_values_list:\r\n # print('raw item value')\r\n # print(item_value)\r\n if item_value[0] == \"'\":\r\n item_value = item_value[1:]\r\n if item_value[-1] == \"'\":\r\n item_value = item_value[:-1]\r\n # print('trimmed item value')\r\n # print(item_value)\r\n xpath = '''//td[.=\"''' + item_value + '''\"]'''\r\n # print(xpath)\r\n table_cell = driver.find_element_by_xpath(xpath)\r\n cell_attr = table_cell.get_attribute('title')\r\n if cell_attr == 'Click to select':\r\n table_cell.click()\r\n done_button = driver.find_element_by_xpath('''//button[.=\"Save & Close\"]''')\r\n done_button.click()\r\n else:\r\n xpath = '''//td[.=\"None\"]'''\r\n table_cell = driver.find_element_by_xpath(xpath)\r\n table_cell.click()\r\n except:\r\n raise NoSuchElementException(\"Could not time element or done button in time.\")\r\n ScreenControlDuringExecution.set_speed(driver, run_speed, field_value)\r\n folder_name = form_name[:9]\r\n picture_filename = form_name + \"_\" + field_order + \"_\" + field_name + '_' + field_id + \"_\" + field_info\r\n ScreenOutputLogging.Logging.save_screenshot_picture_stringbased(driver, folder_name, picture_filename)\r\n logging.info('{ord} - finished processing'.format(ord=field_order + \"_\" + field_id))\r\n except:\r\n raise NoSuchElementException(\"Could not find field element in time\")\r\n\r\n\r\n# #####################################################################################################################\r\ndef s_table_grid_row_and_column_select_46(driver, run_speed, field_id, field_order, field_info, field_name, field_value,\r\n print_log_setting=1, grid_settings=\"\"\"\"\"\", form_name='Test_Form'):\r\n \"\"\"This takes a field data specification for a standard table grid field, logs the action and enters the data\"\"\"\r\n\r\n # grid_select, grid_store, val_flag, number_columns, grid_show_header, # arguments removed when went to single\r\n # function call for all fields (Command Pattern)\r\n\r\n logging.basicConfig(filename='example.log', filemode='w',\r\n level=logging.INFO) # write over existing log in this file\r\n ScreenOutputLogging.pretty_log_field('start', field_order, field_id, field_name, field_info,\r\n print_log_setting)\r\n ScreenOutputLogging.pretty_log_data_value(field_value, print_log_setting)\r\n\r\n try:\r\n label_xpath = '''//div/b/span[.=\"''' + field_name + '''\"]'''\r\n field_label = driver.find_element_by_xpath(label_xpath)\r\n # test for fe visible... #\r\n if field_label.is_displayed(): # proceed to consider entering data #\r\n logging.info('{ord} {label} is visible'.format(ord=field_order, label=field_name))\r\n if print_log_setting >= 2:\r\n print('label xpath {xpath}'.format(xpath=label_xpath))\r\n ScreenControlDuringExecution.scroll_to(driver, field_name, label_xpath)\r\n # assign opt xpath elements... #\r\n # print(field_value)\r\n field_values_list = field_value.split(', ')\r\n try:\r\n div_id = field_id[1:] + 'proxy'\r\n div_path = '''//div[@id=\"''' + div_id + '''\"]'''\r\n # table_id = 'Grid' + field_id[1:]\r\n # table_path = '''//table[@id=\"''' + table_id + '''\"]'''\r\n # print(table_path)\r\n element_to_use = driver.find_element_by_xpath(div_path)\r\n element_to_use.click()\r\n # print('hopefully, that opened the popout...')\r\n for item_value in field_values_list:\r\n # print('raw item value')\r\n # print(item_value)\r\n if item_value[0] == \"'\":\r\n item_value = item_value[1:]\r\n if item_value[-1] == \"'\":\r\n item_value = item_value[:-1]\r\n # print('trimmed item value')\r\n # print(item_value)\r\n xpath = '''//td[.=\"''' + item_value + '''\"]'''\r\n # print(xpath)\r\n table_cell = driver.find_element_by_xpath(xpath)\r\n cell_attr = table_cell.get_attribute('title')\r\n if cell_attr == 'Click to select':\r\n table_cell.click()\r\n done_button = driver.find_element_by_xpath('''//button[.=\"Save & Close\"]''')\r\n done_button.click()\r\n except:\r\n raise NoSuchElementException(\"Could not time element or done button in time.\")\r\n ScreenControlDuringExecution.set_speed(driver, run_speed, field_value)\r\n folder_name = form_name[:9]\r\n picture_filename = form_name + \"_\" + field_order + \"_\" + field_name + '_' + field_id + \"_\" + field_info\r\n ScreenOutputLogging.Logging.save_screenshot_picture_stringbased(driver, folder_name, picture_filename)\r\n logging.info('{ord} - finished processing'.format(ord=field_order + \"_\" + field_id))\r\n except:\r\n raise NoSuchElementException(\"Could not find field element in time\")\r\n\r\n\r\n# #####################################################################################################################\r\ndef s_check_boxes_multiselect_zz(driver, run_speed, field_id, field_order, field_info, field_name, field_value,\r\n print_log_setting=1, excl_opt_val_w_caret=\"\"\"\"\"\", form_name='Test_Form'):\r\n \"\"\"This takes a field data specification for a checkbox field, logs the action and enters the data\"\"\"\r\n logging.basicConfig(filename='example.log', filemode='w',\r\n level=logging.INFO) # write over existing log in this file\r\n ScreenOutputLogging.pretty_log_field('start', field_order, field_id, field_name, field_info,\r\n print_log_setting)\r\n ScreenOutputLogging.pretty_log_data_value(field_value, print_log_setting)\r\n\r\n try:\r\n label_xpath = '''//div/b/span[.=\"''' + field_name + '''\"]'''\r\n field_label = driver.find_element_by_xpath(label_xpath)\r\n label_parent_div_xpath = '''//div[div/b/span[.=\"''' + field_name + '''\"]]'''\r\n # test for fe visible... #\r\n if field_label.is_displayed(): # proceed to consider entering data #\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, label_xpath, 'label xpath')\r\n ScreenControlDuringExecution.scroll_to(driver, field_name, label_xpath)\r\n logging.info('{ord} {label} is visible'.format(ord=field_order, label=field_name))\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, field_order + ' ' + field_name,\r\n 'visible')\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, excl_opt_val_w_caret, 'excl_opt_val_w_caret')\r\n excl_opt_set_trimmed = excl_opt_val_w_caret[1:-1] # takes off extremity single quotes\r\n excl_opt_vals_w_caret = excl_opt_set_trimmed.split(\"', '\") # splits out interior single quotes\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, excl_opt_vals_w_caret,\r\n 'excl_opt_vals_w_caret')\r\n\r\n excl_opts = []\r\n excl_opt_xpaths = []\r\n opts_xpath = '''//input[@label=\"''' + field_name + '''\"]'''\r\n for e_o_v_w_caret in excl_opt_vals_w_caret:\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, e_o_v_w_caret, 'e_o_v_w_caret')\r\n excl_opt_val = \"'\" + e_o_v_w_caret.replace('^', '') + \"'\" # re-wrap in single quotes now\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, excl_opt_val, 'excl_opt_val')\r\n excl_opts.append(excl_opt_val)\r\n\r\n for excl_opt_val in excl_opts:\r\n excl_opt_xpath = opts_xpath + '''[@value=\"''' + excl_opt_val + '''\"]'''\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, excl_opt_xpath,\r\n 'excl_opt_xpath')\r\n excl_opt_xpaths.append(excl_opt_xpath)\r\n\r\n # process the multiple options proposed for entry ready for looping through them #\r\n field_value = field_value[1:]\r\n if field_value[-1:] == \"'\":\r\n field_value = field_value[:-1]\r\n field_value_as_list = field_value.split(\"', '\")\r\n list_of_stored_values = field_value_as_list # 'stored' here refers to what was stored on the 'seed' form\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, list_of_stored_values, 'list of vals')\r\n # loop through the items in the list of values #\r\n for value_to_be_selected in list_of_stored_values:\r\n # quote-wrap the string\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, value_to_be_selected,\r\n 'raw value_to_be_selected')\r\n value_to_be_selected = \"'\" + value_to_be_selected + \"'\"\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, value_to_be_selected,\r\n 'value_to_be_selected')\r\n\r\n # if other - process to separate the two parts (other & freetext)\r\n if value_to_be_selected[:9] == \"\"\"'Other', \"\"\":\r\n other_free_text_value = value_to_be_selected[9:-1] # the free text part\r\n value_to_be_selected = value_to_be_selected[:7] # the conformed 'other' part\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, other_free_text_value,\r\n 'other_free_text_value')\r\n # establish the free text element\r\n free_text_xpath = label_parent_div_xpath + '''//td[2]//div/input'''\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, free_text_xpath, 'free_text_xpath')\r\n\r\n # begin clicking the other option...\r\n try:\r\n # set up the individual option checkbox\r\n xpath = opts_xpath + '''[@value=\"''' + value_to_be_selected + '''\"]'''\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, xpath, 'xpath')\r\n box_to_be_checked = driver.find_element_by_xpath(xpath)\r\n\r\n # if already checked, skip it\r\n if box_to_be_checked.is_selected():\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, value_to_be_selected,\r\n 'already selected')\r\n pass\r\n\r\n # check the box\r\n else:\r\n box_to_be_checked.click()\r\n\r\n # update the free text part\r\n free_text_element = driver.find_element_by_xpath(free_text_xpath)\r\n free_text_element.clear()\r\n free_text_element.send_keys(other_free_text_value)\r\n except:\r\n raise NoSuchElementException(\"Could not select 'Other' or its free text element\")\r\n else:\r\n xpath = '''//input[@label=\"''' + field_name + '''\"][@value=\"''' + value_to_be_selected + '''\"]'''\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, xpath, 'xpath')\r\n if value_to_be_selected != \"\" and value_to_be_selected != \"''\":\r\n box_to_be_checked = driver.find_element_by_xpath(xpath)\r\n if box_to_be_checked.is_selected():\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, value_to_be_selected,\r\n 'already selected')\r\n pass\r\n else:\r\n # remove excl opt if selected\r\n for excl_opt_xpath in excl_opt_xpaths:\r\n excl_opt_el = driver.find_element_by_xpath(excl_opt_xpath)\r\n if excl_opt_el.is_selected():\r\n excl_opt_el.click()\r\n\r\n # add options required on this execution\r\n box_to_be_checked.click()\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, value_to_be_selected,\r\n 'now selected')\r\n else:\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, value_to_be_selected,\r\n 'no value to be selected')\r\n pass\r\n # except:\r\n # raise NoSuchElementException(\"Could not select the checkbox in time\")\r\n ScreenControlDuringExecution.set_speed(driver, run_speed, field_value)\r\n folder_name = form_name[:9]\r\n picture_filename = form_name + \"_\" + field_order + \"_\" + field_name + '_' + field_id + \"_\" + field_info\r\n ScreenOutputLogging.Logging.save_screenshot_picture_stringbased(driver, folder_name, picture_filename)\r\n logging.info('{ord} - finished processing'.format(ord=field_order + \"_\" + field_id))\r\n except:\r\n raise NoSuchElementException(\"Could not find field element in time\")\r\n\r\n\r\n# #####################################################################################################################\r\ndef s_table_in_line_editor_67(driver, run_speed, field_id, field_order, field_info, field_name, field_value,\r\n print_log_setting=1, field_column_attributes=\"\"\"\"\"\", form_name='Test_Form'):\r\n \"\"\"This takes a field data specification for a TILE, logs the action and enters the data\"\"\"\r\n logging.basicConfig(filename='example.log', filemode='w',\r\n level=logging.INFO) # write over existing log in this file\r\n ScreenOutputLogging.pretty_log_field('start', field_order, field_id, field_name, field_info,\r\n print_log_setting)\r\n ScreenOutputLogging.pretty_log_data_value(field_value, print_log_setting)\r\n if field_value != 'NULL':\r\n try:\r\n label_xpath = '''//div/b/span[.=\"''' + field_name + '''\"]'''\r\n field_label = driver.find_element_by_xpath(label_xpath)\r\n # test for fe visible... #\r\n # print('found TILE field label')\r\n if field_label.is_displayed(): # proceed to consider entering data #\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, label_xpath, 'label xpath')\r\n ScreenControlDuringExecution.scroll_to(driver, field_name, label_xpath)\r\n logging.info('{ord} {label} is visible'.format(ord=field_order, label=field_name))\r\n\r\n\r\n # first, interrogate the existing table rows (page rows) to see where to begin new ones\r\n # (ie augment the source row count)\r\n rows_xpath = '''//div[b/span[.=\"''' + field_name \\\r\n + '''\"]]/following-sibling::div//table/tbody/tr//table/tbody/tr'''\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 3, rows_xpath, 'rows_xpath')\r\n # capture existing stored data on the form (page data) # ###########################################\r\n set_of_existing_data_rows_in_tile = driver.find_elements_by_xpath(rows_xpath)\r\n text_of_page_rows = []\r\n page_table_data = []\r\n adj = 0\r\n row_index = 0\r\n for row_element in set_of_existing_data_rows_in_tile:\r\n page_row_text = row_element.text\r\n text_of_page_rows.append(page_row_text)\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 3, text_of_page_rows, 'text_of_rows')\r\n if page_row_text == '''+ x''':\r\n adj = -1 # if a tile row is empty, take it off the overall size of the table\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 3, page_row_text, 'page_row_text')\r\n row_index += 1\r\n row_xpath = rows_xpath + '''[''' + str(row_index) + ''']'''\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 3, row_xpath, 'row_xpath')\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 3, row_element.get_attribute('innerHTML'),\r\n 'row_element')\r\n set_of_existing_data_cells_in_row = driver.find_elements_by_xpath(row_xpath + '''/td[\r\n @class=\"editable\"]''')\r\n page_row_data = []\r\n cell_index = 0\r\n for cell_element in set_of_existing_data_cells_in_row:\r\n cell_index += 1\r\n cell_xpath = row_xpath + '''[''' + str(cell_index) + ''']'''\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 3, cell_xpath, 'cell_xpath')\r\n # build comparable spec to incoming data (NB - this data format is now outdated - came from SQL)\r\n id_page_cell_data = '''[''' + str(row_index) + ''']|[''' + str(cell_index) + ''']|''' + \\\r\n cell_element.text\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 3, id_page_cell_data,\r\n 'page_cell_data')\r\n page_cell_data = cell_element.text\r\n page_row_data.append(page_cell_data)\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, page_row_data, 'page_row_data')\r\n page_table_data.append(page_row_data)\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, page_table_data, 'page_table_data')\r\n # ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, row2_text, 'row2_text')\r\n number_of_existing_data_rows_in_tile = len(set_of_existing_data_rows_in_tile)\r\n # augment row count with this number\r\n row_augment = number_of_existing_data_rows_in_tile + adj\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 1, row_augment, 'row_augment')\r\n\r\n\r\n\r\n print('boo')\r\n # process the column attributes (inc data types) #\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 3, field_column_attributes,\r\n 'field_column_attributes')\r\n field_col_attr_as_list = field_column_attributes[1]\r\n list_of_attributes = field_col_attr_as_list\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, list_of_attributes, 'list_of_attributes')\r\n\r\n bee = '''\"39268|1|Past medical history and ACE comorbidities|autocomplete|\", \"39268|2|Date (\r\n optional)|date|\"'''\r\n print(bee)\r\n\r\n\r\n # process the multiple options to be entered ready for looping through them #\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, field_value, 'field_value')\r\n\r\n htmldata = xmltodict.parse(field_value)\r\n print('table')\r\n print(htmldata['table'])\r\n table_id = htmldata['table']['@id']\r\n print(table_id)\r\n field_id_from_table = table_id.replace('proxytable', '') # 39268proxytable\r\n print(field_id_from_table)\r\n rows_list = []\r\n row_cnt = 0\r\n cell_cnt = 0\r\n rows = htmldata['table']['tbody']['tr']\r\n print(rows)\r\n if not isinstance(rows, list):\r\n print('rows was not a list - only a single row exists in data to be submitted')\r\n # make it into a list of length 1\r\n rows_to_iterate = list(rows)\r\n print(rows_to_iterate)\r\n print('A')\r\n else:\r\n rows_to_iterate = rows\r\n print('rows is a list - more than one row exists in data to be submitted')\r\n print(rows_to_iterate)\r\n print('B')\r\n print('C')\r\n for row in rows_to_iterate: # now happily iterate knowing that singleton rows are also in lists\r\n row_cnt += 1\r\n print(row)\r\n print('D')\r\n tds = row['td']\r\n cells_list = []\r\n cell_cnt = 0\r\n print('E')\r\n if not isinstance(tds, list):\r\n print('tds was not a list - only a single col exists in the data to be submitted')\r\n # make it into a list of length 1\r\n tds_to_iterate = list(tds)\r\n else:\r\n print('tds is a list - more than one col exists in the data to be submitted')\r\n tds_to_iterate = tds\r\n print('tds_to_iterate: {x}'.format(x=tds_to_iterate))\r\n for td in tds_to_iterate: # now happily iterate knowing that singleton cols are also in lists\r\n print('td: {x}'.format(x=td))\r\n cell_cnt += 1\r\n td_text = ''\r\n if '#text' in td:\r\n td_text = td['#text']\r\n print(td_text)\r\n td_detail = '[' + str(row_cnt) + ']|[' + str(cell_cnt) + ']|' + td_text\r\n cells_list.append(td_detail)\r\n print(cells_list)\r\n rows_list.append(cells_list)\r\n print('row done')\r\n\r\n print('table done')\r\n print('row count: {x}'.format(x=row_cnt))\r\n print('col count: {x}'.format(x=cell_cnt))\r\n print(rows_list)\r\n\r\n print('booyah')\r\n\r\n\r\n # listing of values to store\r\n # trimmed_value = field_value[1:-1] # remove leading and trailing double quotes\r\n # split_value = trimmed_value.split('\", \"')\r\n list_of_values_to_store = rows_list #split_value\r\n\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, list_of_values_to_store,\r\n 'list_of_values_to_store')\r\n # derive size of table - rows and cols\r\n # (cols for position of new row button, rows because you get that first...) #\r\n last_row_last_col = list_of_values_to_store[-1][-1]\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, last_row_last_col, 'l row l col')\r\n\r\n last_row = (last_row_last_col[:(last_row_last_col.index('|')+1)])\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 3, last_row, 'last_row')\r\n last_row = last_row[1:-2]\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, last_row, 'last_row')\r\n last_row_last_col = last_row_last_col[(last_row_last_col.index('|')+1):]\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, last_row_last_col, 'last_row_last_col')\r\n last_col = (last_row_last_col[:(last_row_last_col.index('|'))])[1:-1]\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, last_col, 'last_col')\r\n\r\n last_row = str(row_cnt)\r\n last_col = str(cell_cnt)\r\n\r\n add_row_button_col = str(int(last_col)+1)\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, last_row + ' ' + last_col, 'rows n cols')\r\n print('')\r\n # loop through the items in the list of values #\r\n\r\n row_counter = row_augment # start at end of existing rows\r\n\r\n\r\n # for row_to_be_entered in table_collector:\r\n for row_to_be_entered in rows_list:\r\n print('')\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 1, row_to_be_entered,\r\n 'row_to_be_entered')\r\n cell_counter = 1\r\n # use the col number to locate the correct attribute list member #\r\n this_col_attr_data = list_of_attributes[cell_counter]\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 3, this_col_attr_data,\r\n 'this_col_attr_data')\r\n right_one = this_col_attr_data[this_col_attr_data.index('|')+1:]\r\n right_two = right_one[right_one.index('|')+1:]\r\n right_thr = right_two[right_two.index('|')+1:]\r\n this_col_data_type = right_thr[:right_thr.index('|')]\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 3, this_col_data_type,\r\n 'this_col_data_type')\r\n\r\n tbody_xpath = '''//div[@label=\"''' + field_name + '''\"]//tbody'''\r\n # ready to enter data (and add rows to do that)\r\n # but consider whether this row needs adding...\r\n if row_to_be_entered in page_table_data:\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 1, row_to_be_entered,\r\n 'already there')\r\n else:\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 1, row_to_be_entered,\r\n 'not already there')\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 1, row_counter,\r\n 'row_counter for row-add button')\r\n\r\n row_xpath_for_add_row_button = tbody_xpath + '''//tr[''' + str(row_counter) + ''']'''\r\n add_row_xpath = row_xpath_for_add_row_button + '''/td[''' + add_row_button_col +\\\r\n ''']/span[@title=\"Insert row below\"]'''\r\n row_counter += 1\r\n row_xpath = tbody_xpath + '''//tr[''' + str(row_counter) + ''']'''\r\n\r\n if row_counter > 1:\r\n # add_row_xpath = '''//div[@label=\"''' + field_name + '''\"]//tbody//tr[''' + str(row_counter) +\\\r\n # ''']/td[''' + add_row_button_col + ''']/span[@title=\"Insert row below\"]'''\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 1, add_row_xpath,\r\n 'add_row_xpath')\r\n add_new_row_button = driver.find_element_by_xpath(add_row_xpath)\r\n # time.sleep(5)\r\n add_new_row_button.click()\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 1, 'clicked...',\r\n 'add_row_button')\r\n # time.sleep(1)\r\n if print_log_setting >= 2:\r\n print('added new TILE row - row {R}'.format(R=row_counter+1))\r\n print('processing row {R} col {C}'.format(R=row_counter+1, C=cell_counter))\r\n for cell_value in row_to_be_entered:\r\n value_to_be_entered = row_to_be_entered[cell_counter-1]\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 1, cell_counter,\r\n 'cell_counter')\r\n # [1]|[1]|Aortobifemoral bypass\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 1, value_to_be_entered,\r\n 'cell value_to_be_entered')\r\n\r\n right_a = value_to_be_entered[value_to_be_entered.index('|')+1:]\r\n print(right_a)\r\n right_b = right_a[right_a.index('|')+1:]\r\n print(right_b)\r\n act_val_to_use = right_b\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 1, act_val_to_use,\r\n 'cell act_val_to_use')\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 2, cell_value,\r\n 'test cell_value?')\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 1, this_col_data_type,\r\n 'cell this_col_data_type')\r\n if act_val_to_use != '':\r\n cell_to_process_xpath = row_xpath + '''/td[''' + str(cell_counter) + ''']'''\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 1,\r\n cell_to_process_xpath,\r\n 'cell_to_process_xpath')\r\n if this_col_data_type == 'autocomplete':\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 1,\r\n this_col_data_type,\r\n 'this_col_data_type')\r\n try:\r\n cell_to_process = driver.find_element_by_xpath(cell_to_process_xpath)\r\n tile_actions = ActionChains(driver).click_and_hold(cell_to_process)\\\r\n .release(cell_to_process).send_keys(act_val_to_use)\r\n tile_actions.perform()\r\n tile_match_xpath = '''//ul/li/a[.=\"''' + act_val_to_use + '''\"]'''\r\n if Utils.is_element_present(driver, 'xpath', tile_match_xpath):\r\n try:\r\n print('see if there is a match option')\r\n element = WebDriverWait(driver, 3).until(\r\n EC.visibility_of_element_located((By.XPATH, tile_match_xpath))\r\n )\r\n finally:\r\n print('waited for match options')\r\n driver.implicitly_wait(3)\r\n try:\r\n print('click on the match option')\r\n data_value = driver.find_element_by_xpath(tile_match_xpath)\r\n data_value.click()\r\n finally:\r\n print('tried to click on match option')\r\n driver.implicitly_wait(3)\r\n try:\r\n print('wait for the match options to disappear')\r\n element = WebDriverWait(driver, 3).until(\r\n EC.invisibility_of_element_located((By.XPATH, tile_match_xpath))\r\n )\r\n finally:\r\n print('have waited for options to disappear')\r\n # print('TILE widget is hidden again')\r\n driver.implicitly_wait(3)\r\n finally:\r\n print('no match element - just carry on')\r\n # except:\r\n # raise NoSuchElementException(\"Could not select the TILE cell autocomplete option in time\")\r\n elif this_col_data_type == 'dropdown':\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 1,\r\n this_col_data_type,\r\n 'this_col_data_type')\r\n try:\r\n cell_to_process = driver.find_element_by_xpath(cell_to_process_xpath)\r\n tile_actions = ActionChains(driver).click_and_hold(cell_to_process) \\\r\n .release(cell_to_process).send_keys(act_val_to_use)\r\n tile_actions.perform()\r\n print('sent keys')\r\n # match_xpath = '''//ul/li/a[.=\"''' + value_to_be_entered + '''\"]'''\r\n match_xpath = '''//div[@id=\"select2-drop\"]/ul/li[.=\"''' + act_val_to_use \\\r\n + '''\"]'''\r\n if Utils.is_element_present(driver, 'xpath', match_xpath):\r\n try:\r\n element = WebDriverWait(driver, 3).until(\r\n EC.visibility_of_element_located((By.XPATH, match_xpath))\r\n )\r\n element.click()\r\n # finally:\r\n # driver.implicitly_wait(3)\r\n # try:\r\n element = WebDriverWait(driver, 3).until(\r\n EC.invisibility_of_element_located((By.XPATH, match_xpath))\r\n )\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 1,\r\n element,\r\n 'now hidden')\r\n finally:\r\n # tile_actions = ActionChains(driver).click_and_hold(cell_to_process) \\\r\n # .release(cell_to_process).send_keys(Keys.ENTER)\r\n # tile_actions.perform()\r\n print('tried match list work')\r\n # driver.implicitly_wait(15)\r\n else:\r\n print('no match list, move on')\r\n except:\r\n raise NoSuchElementException(\r\n \"Could not select the TILE cell dropdown option in time\")\r\n elif this_col_data_type == 'date':\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 1,\r\n this_col_data_type,\r\n 'this_col_data_type')\r\n try:\r\n cell_to_process = driver.find_element_by_xpath(cell_to_process_xpath)\r\n tile_actions = ActionChains(driver).click_and_hold(cell_to_process) \\\r\n .release(cell_to_process).send_keys(act_val_to_use).send_keys(\r\n Keys.ENTER).send_keys(Keys.RETURN)\r\n tile_actions.perform()\r\n\r\n except:\r\n raise NoSuchElementException(\r\n \"Could not select the TILE cell date option in time\")\r\n elif this_col_data_type == 'text':\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 1,\r\n this_col_data_type,\r\n 'this_col_data_type')\r\n try:\r\n cell_to_process = driver.find_element_by_xpath(cell_to_process_xpath)\r\n tile_actions = ActionChains(driver).click_and_hold(cell_to_process) \\\r\n .release(cell_to_process).send_keys(act_val_to_use)\r\n tile_actions.perform()\r\n except:\r\n raise NoSuchElementException(\"Could not select the TILE cell text option in time\")\r\n else:\r\n print('TILE column type not catered for in python code !!')\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 1,\r\n this_col_data_type,\r\n 'this_col_data_type')\r\n\r\n cell_counter += 1\r\n print('')\r\n print('extra spurious row-add for removing date widgets')\r\n row_xpath = tbody_xpath + '''//tr[''' + str(row_counter) + ''']'''\r\n add_row_xpath = row_xpath + '''/td[''' + add_row_button_col + \\\r\n ''']/span[@title=\"Insert row below\"]'''\r\n add_new_row_button = driver.find_element_by_xpath(add_row_xpath)\r\n # time.sleep(5)\r\n add_new_row_button.click()\r\n ScreenOutputLogging.pretty_log_field_step(print_log_setting, 1, 'clicked...',\r\n 'add_row_button')\r\n ScreenControlDuringExecution.set_speed(driver, run_speed, field_value)\r\n folder_name = form_name[:9]\r\n picture_filename = form_name + \"_\" + field_order + \"_\" + field_name + '_' + field_id + \"_\" + field_info\r\n ScreenOutputLogging.Logging.save_screenshot_picture_stringbased(driver, folder_name, picture_filename)\r\n logging.info('{ord} - finished processing'.format(ord=field_order + \"_\" + field_id))\r\n except:\r\n raise NoSuchElementException(\"Could not find field element in time\")\r\n else:\r\n print('no data to enter - move on')\r\n","sub_path":"EFields/ Multiselect.py","file_name":" Multiselect.py","file_ext":"py","file_size_in_byte":51674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"536008104","text":"# coding: utf-8\n# Copyright 2017 challenger.ai\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Build tfrecord data.\"\"\"\n# python2.7\n# __author__ = 'WANG, Heda'\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import Counter\nfrom collections import namedtuple\nfrom datetime import datetime\nimport json\nimport os.path\nimport random\nimport sys\nimport base64\n\nreload(sys)\nsys.setdefaultencoding('utf8')\nimport threading\nimport jieba\nimport numpy as np\nimport tensorflow as tf\n\n# input data\ntf.flags.DEFINE_string(\"train_image_dir\", \"data/ai_challenger_caption_train_20170902/caption_train_images_20170902\",\n \"Training image directory.\")\ntf.flags.DEFINE_string(\"train_captions_file\", \"data/ai_challenger_caption_train_20170902/caption_train_annotations_20170902.json\",\n \"Training captions JSON file.\")\ntf.flags.DEFINE_string(\"train_localizations_file\", \"data/bottom_up_attention/aichallenger_train.tsv.small\",\n \"Training captions TSV file.\")\n\ntf.flags.DEFINE_string(\"validate_image_dir\", \"data/ai_challenger_caption_validation_20170910/caption_validation_images_20170910\",\n \"Validation image directory.\")\ntf.flags.DEFINE_string(\"validate_localizations_file\", \"data/bottom_up_attention/aichallenger_validate.tsv.small\",\n \"Validating captions TSV file.\")\n\ntf.flags.DEFINE_string(\"test1_image_dir\", \"data/ai_challenger_caption_test1_20170923/caption_test1_images_20170923\",\n \"Test image directory.\")\ntf.flags.DEFINE_string(\"test1_localizations_file\", \"data/bottom_up_attention/aichallenger_test1.tsv.small\",\n \"Test captions TSV file.\")\n\ntf.flags.DEFINE_string(\"test2_image_dir\", \"data/ai_challenger_caption_test_b_20171120/caption_test_b_images_20171120\",\n \"Test image directory.\")\ntf.flags.DEFINE_string(\"test2_localizations_file\", \"data/bottom_up_attention/aichallenger_test2.tsv.small\",\n \"Test captions TSV file.\")\n\n\n# use existing word counts file\ntf.flags.DEFINE_string(\"word_counts_input_file\",\n \"\",\n \"If defined, use existing word_counts_file.\")\n\n# output files\ntf.flags.DEFINE_string(\"output_dir\", \"data/Loc_TFRecord_data\", \"Output directory for tfrecords.\")\ntf.flags.DEFINE_string(\"word_counts_output_file\",\n \"data/word_counts.txt\",\n \"Output vocabulary file of word counts.\")\n\n# words parameters\ntf.flags.DEFINE_string(\"start_word\", \"\",\n \"Special word added to the beginning of each sentence.\")\ntf.flags.DEFINE_string(\"end_word\", \"\",\n \"Special word added to the end of each sentence.\")\ntf.flags.DEFINE_string(\"unknown_word\", \"\",\n \"Special word meaning 'unknown'.\")\n\n# the minimum word count\ntf.flags.DEFINE_integer(\"min_word_count\", 4,\n \"The minimum number of occurrences of each word in the \"\n \"training set for inclusion in the vocabulary.\")\n\n# threads\ntf.flags.DEFINE_integer(\"num_threads\", 8,\n \"Number of threads to preprocess the images.\")\n\n# sharding parameters\ntf.flags.DEFINE_integer(\"train_shards\", 280,\n \"Number of shards in training TFRecord files.\")\ntf.flags.DEFINE_integer(\"validate_shards\", 8,\n \"Number of shards in validation TFRecord files.\")\ntf.flags.DEFINE_integer(\"test1_shards\", 8,\n \"Number of shards in testing TFRecord files.\")\ntf.flags.DEFINE_integer(\"test2_shards\", 8,\n \"Number of shards in testing TFRecord files.\")\n\ntf.flags.DEFINE_boolean(\"build_flip_caption\", False,\n \"Whether to generate flip caption. If True, only build train set,\"\n \"If set False, build train and dev set\")\n\ntf.flags.DEFINE_integer(\"max_ref_length\", 30,\n \"Maximum caption length.\")\ntf.flags.DEFINE_integer(\"num_refs\", 5,\n \"Number of references per image.\")\n\ntf.flags.DEFINE_string(\"task\", \"train\",\n \"Options are train/validate/test1/test2.\")\n\nFLAGS = tf.flags.FLAGS\n\n\nImageMetadata = namedtuple(\"ImageMetadata\",\n [\"id\", \"filename\", \"base_filename\", \"localization\", \"captions\", \"flip_captions\"])\n\n# functions to flip caption\ndef find_all(string, query):\n # return all positions\n query_len = len(query)\n positions = []\n beg = 0\n pos = string.find(query, beg)\n while pos != -1:\n positions.append(pos)\n beg = pos + query_len\n pos = string.find(query, beg)\n return positions\n\ndef func_flip_caption(caption):\n lr_pos = find_all(caption, u\"左右\")\n noflip_pos = []\n for pos in lr_pos:\n noflip_pos.append(pos)\n noflip_pos.append(pos + 1)\n l_pos = find_all(caption, u\"左\")\n l_pos = [pos for pos in l_pos if pos not in noflip_pos]\n\n r_pos = find_all(caption, u\"右\")\n r_pos = [pos for pos in r_pos if pos not in noflip_pos]\n\n if not l_pos and not r_pos:\n return caption\n\n new_caption = \"\"\n for i,c in enumerate(caption):\n if i in l_pos:\n new_caption += u\"右\"\n elif i in r_pos:\n new_caption += u\"左\"\n else:\n new_caption += c\n return new_caption\n\n\nclass Vocabulary(object):\n \"\"\"Simple vocabulary wrapper.\"\"\"\n\n def __init__(self, vocab, unk_id):\n \"\"\"Initializes the vocabulary.\n Args:\n vocab: A dictionary of word to word_id.\n unk_id: Id of the special 'unknown' word.\n \"\"\"\n self._vocab = vocab\n self._unk_id = unk_id\n\n def word_to_id(self, word):\n \"\"\"Returns the integer id of a word string.\"\"\"\n if word in self._vocab:\n return self._vocab[word]\n else:\n return self._unk_id\n\ndef load_vocab(vocab_file):\n if not tf.gfile.Exists(vocab_file):\n print(\"Vocab file %s not found.\", vocab_file)\n exit()\n print(\"Initializing vocabulary from file: %s\", vocab_file)\n\n with tf.gfile.GFile(vocab_file, mode=\"r\") as f:\n reverse_vocab = list(f.readlines())\n reverse_vocab = [line.split()[0].decode('utf-8') for line in reverse_vocab]\n assert FLAGS.start_word in reverse_vocab\n assert FLAGS.end_word in reverse_vocab\n assert FLAGS.unknown_word not in reverse_vocab\n\n unk_id = len(reverse_vocab)\n vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])\n vocab = Vocabulary(vocab_dict, unk_id)\n return vocab\n\n\n\nclass ImageDecoder(object):\n \"\"\"Helper class for decoding images in TensorFlow.\"\"\"\n\n def __init__(self):\n # Create a single TensorFlow Session for all image decoding calls.\n self._sess = tf.Session()\n\n # TensorFlow ops for JPEG decoding.\n self._encoded_jpeg = tf.placeholder(dtype=tf.string)\n self._decode_jpeg = tf.image.decode_jpeg(self._encoded_jpeg, channels=3)\n\n def decode_jpeg(self, encoded_jpeg):\n image = self._sess.run(self._decode_jpeg,\n feed_dict={self._encoded_jpeg: encoded_jpeg})\n assert len(image.shape) == 3\n assert image.shape[2] == 3\n return image\n\n\ndef _int64_feature(value):\n \"\"\"Wrapper for inserting an int64 Feature into a SequenceExample proto.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef _bytes_feature(value):\n \"\"\"Wrapper for inserting a bytes Feature into a SequenceExample proto.\"\"\"\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))\n\ndef _int64_list(value):\n \"\"\"Wrapper for inserting a bytes Feature into a SequenceExample proto.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\ndef _float_list(value):\n \"\"\"Wrapper for inserting a bytes Feature into a SequenceExample proto.\"\"\"\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\ndef _bytes_list(value):\n \"\"\"Wrapper for inserting a bytes Feature into a SequenceExample proto.\"\"\"\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))\n\ndef _int64_feature_list(values):\n \"\"\"Wrapper for inserting an int64 FeatureList into a SequenceExample proto.\"\"\"\n return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])\n\ndef _bytes_feature_list(values):\n \"\"\"Wrapper for inserting a bytes FeatureList into a SequenceExample proto.\"\"\"\n return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values])\n\ndef pad_or_truncate(captions, lengths):\n max_length = FLAGS.max_ref_length\n num_refs = FLAGS.num_refs \n lengths = [min(l, max_length) for l in lengths]\n captions = [c[:l] + [0] * (max_length - l) for c, l in zip(captions, lengths)]\n if len(captions) < num_refs:\n captions = captions + [[0] * max_length for i in xrange(num_refs - len(captions))]\n lengths = lengths + [0] * (num_refs - len(captions))\n flat_captions = []\n for c in captions:\n flat_captions.extend(c)\n assert len(flat_captions) == num_refs * max_length\n assert len(lengths) == num_refs\n return flat_captions, lengths\n\ndef _to_sequence_example(image, decoder, vocab):\n \"\"\"Builds a SequenceExample proto for an image-caption pair.\n Args:\n image: An ImageMetadata object.\n decoder: An ImageDecoder object.\n vocab: A Vocabulary object.\n Returns:\n A SequenceExample proto.\n \"\"\"\n with tf.gfile.FastGFile(image.filename, \"r\") as f:\n encoded_image = f.read()\n\n try:\n decoder.decode_jpeg(encoded_image)\n except (tf.errors.InvalidArgumentError, AssertionError):\n print(\"Skipping file with invalid JPEG data: %s\" % image.filename)\n return\n\n base_filename = image.base_filename\n localization = image.localization\n feature_list = {\n \"image/id\": _int64_feature(image.id),\n \"image/filename\": _bytes_feature(base_filename),\n \"image/localization\": _float_list(localization),\n \"image/data\": _bytes_feature(encoded_image),\n }\n\n if image.captions is not None:\n caption_ids = [[vocab.word_to_id(word) for word in caption] for caption in image.captions]\n caption_lengths = [len(caption) for caption in caption_ids]\n flip_caption_ids = [[vocab.word_to_id(word) for word in caption] for caption in image.flip_captions]\n flip_caption_lengths = [len(caption) for caption in flip_caption_ids]\n\n caption_ids, caption_lengths = pad_or_truncate(caption_ids, caption_lengths)\n flip_caption_ids, flip_caption_lengths = pad_or_truncate(flip_caption_ids, flip_caption_lengths)\n\n feature_list.update({\n \"image/ref_words\": _int64_list(caption_ids),\n \"image/ref_lengths\": _int64_list(caption_lengths),\n \"image/flipped_ref_words\": _int64_list(flip_caption_ids),\n \"image/flipped_ref_lengths\": _int64_list(flip_caption_lengths),\n })\n \n features = tf.train.Features(feature=feature_list)\n example = tf.train.Example(features=features)\n\n return example\n\n\ndef _process_image_files(thread_index, ranges, name, images, decoder, vocab,\n num_shards):\n \"\"\"Processes and saves a subset of images as TFRecord files in one thread.\n Args:\n thread_index: Integer thread identifier within [0, len(ranges)].\n ranges: A list of pairs of integers specifying the ranges of the dataset to\n process in parallel.\n name: Unique identifier specifying the dataset.\n images: List of ImageMetadata.\n decoder: An ImageDecoder object.\n vocab: A Vocabulary object.\n num_shards: Integer number of shards for the output files.\n \"\"\"\n # Each thread produces N shards where N = num_shards / num_threads. For\n # instance, if num_shards = 128, and num_threads = 2, then the first thread\n # would produce shards [0, 64).\n num_threads = len(ranges)\n assert not num_shards % num_threads\n num_shards_per_batch = int(num_shards / num_threads)\n\n shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1],\n num_shards_per_batch + 1).astype(int)\n num_images_in_thread = ranges[thread_index][1] - ranges[thread_index][0]\n\n counter = 0\n for s in range(num_shards_per_batch):\n # Generate a sharded version of the file name, e.g. 'train-00002-of-00010'\n shard = thread_index * num_shards_per_batch + s\n output_filename = \"%s-%.5d-of-%.5d.tfrecord\" % (name, shard, num_shards)\n output_file = os.path.join(FLAGS.output_dir, output_filename)\n writer = tf.python_io.TFRecordWriter(output_file)\n\n shard_counter = 0\n images_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)\n for i in images_in_shard:\n image = images[i]\n\n sequence_example = _to_sequence_example(image, decoder, vocab)\n if sequence_example is not None:\n writer.write(sequence_example.SerializeToString())\n shard_counter += 1\n counter += 1\n\n if not counter % 1000:\n print(\"%s [thread %d]: Processed %d of %d items in thread batch.\" %\n (datetime.now(), thread_index, counter, num_images_in_thread))\n sys.stdout.flush()\n\n writer.close()\n print(\"%s [thread %d]: Wrote %d image-caption pairs to %s\" %\n (datetime.now(), thread_index, shard_counter, output_file))\n sys.stdout.flush()\n shard_counter = 0\n print(\"%s [thread %d]: Wrote %d image-caption pairs to %d shards.\" %\n (datetime.now(), thread_index, counter, num_shards_per_batch))\n sys.stdout.flush()\n\n\ndef _process_dataset(name, images, vocab, num_shards):\n \"\"\"Processes a complete data set and saves it as a TFRecord.\n Args:\n name: Unique identifier specifying the dataset.\n images: List of ImageMetadata.\n vocab: A Vocabulary object.\n num_shards: Integer number of shards for the output files.\n \"\"\"\n # Break up each image into a separate entity for each caption.\n images = [ImageMetadata(image.id, image.filename, image.base_filename, image.localization, image.captions, image.flip_captions)\n for image in images]\n\n # Shuffle the ordering of images. Make the randomization repeatable.\n random.seed(12345)\n random.shuffle(images)\n\n # Break the images into num_threads batches. Batch i is defined as\n # images[ranges[i][0]:ranges[i][1]].\n num_threads = min(num_shards, FLAGS.num_threads)\n spacing = np.linspace(0, len(images), num_threads + 1).astype(np.int)\n ranges = []\n threads = []\n for i in range(len(spacing) - 1):\n ranges.append([spacing[i], spacing[i + 1]])\n\n # Create a mechanism for monitoring when all threads are finished.\n coord = tf.train.Coordinator()\n\n # Create a utility for decoding JPEG images to run sanity checks.\n decoder = ImageDecoder()\n\n # Launch a thread for each batch.\n print(\"Launching %d threads for spacings: %s\" % (num_threads, ranges))\n for thread_index in range(len(ranges)):\n args = (thread_index, ranges, name, images, decoder, vocab, num_shards)\n t = threading.Thread(target=_process_image_files, args=args)\n t.start()\n threads.append(t)\n\n # Wait for all the threads to terminate.\n coord.join(threads)\n print(\"%s: Finished processing all %d image-caption pairs in data set '%s'.\" %\n (datetime.now(), len(images), name))\n\n\ndef _create_vocab(captions):\n \"\"\"Creates the vocabulary of word to word_id.\n The vocabulary is saved to disk in a text file of word counts. The id of each\n word in the file is its corresponding 0-based line number.\n Args:\n captions: A list of lists of strings.\n Returns:\n A Vocabulary object.\n \"\"\"\n print(\"Creating vocabulary.\")\n counter = Counter()\n for c in captions:\n counter.update(c)\n print(\"Total words:\", len(counter))\n\n # Filter uncommon words and sort by descending count.\n word_counts = [x for x in counter.items() if x[1] >= FLAGS.min_word_count]\n word_counts.sort(key=lambda x: x[1], reverse=True)\n print(\"Words in vocabulary:\", len(word_counts))\n\n # Write out the word counts file.\n with tf.gfile.FastGFile(FLAGS.word_counts_output_file, \"w\") as f:\n f.write(\"\\n\".join([\"%s %d\" % (w, c) for w, c in word_counts]))\n print(\"Wrote vocabulary file:\", FLAGS.word_counts_output_file)\n\n # Create the vocabulary dictionary.\n reverse_vocab = [x[0] for x in word_counts]\n unk_id = len(reverse_vocab)\n vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])\n vocab = Vocabulary(vocab_dict, unk_id)\n\n return vocab\n\n\ndef _process_caption_jieba(caption):\n \"\"\"Processes a Chinese caption string into a list of tonenized words.\n Args:\n caption: A string caption.\n Returns:\n A list of strings; the tokenized caption.\n \"\"\"\n tokenized_caption = [FLAGS.start_word]\n tokenized_caption.extend(jieba.cut(caption, cut_all=False))\n tokenized_caption.append(FLAGS.end_word)\n return tokenized_caption\n\ndef _load_localization_file(localizations_file):\n loc_dict = {}\n with open(localizations_file) as F:\n for line in F:\n filename, width, height, num_boxes, box_str = line.strip().split()\n num_boxes = int(num_boxes)\n assert num_boxes == 36\n box_blob = base64.decodestring(box_str)\n box_array = np.frombuffer(box_blob, dtype=np.float32)\n assert len(box_array) == 4*num_boxes\n for i in xrange(0, len(box_array), 4):\n l1, u1, l2, u2 = box_array[i:i+4]\n assert l1 < l2\n assert u1 < u2\n loc_dict[filename] = box_array\n return loc_dict\n\ndef _load_and_process_metadata(captions_file, localizations_file, image_dir):\n \"\"\"Loads image metadata from a JSON file and processes the captions.\n Args:\n captions_file: Json file containing caption annotations.\n image_dir: Directory containing the image files.\n Returns:\n A list of ImageMetadata.\n \"\"\"\n loc_dict = _load_localization_file(localizations_file)\n image_id = set([])\n\n if captions_file is not None:\n id_to_captions = {}\n with open(captions_file, 'r') as f:\n caption_data = json.load(f)\n for data in caption_data:\n image_name = data['image_id'].split('.')[0]\n descriptions = data['caption']\n if image_name not in image_id:\n id_to_captions.setdefault(image_name, [])\n image_id.add(image_name)\n\n caption_num = len(descriptions)\n\n for i in range(caption_num):\n caption_temp = descriptions[i].strip().strip(\"。\").replace('\\n', '')\n if caption_temp != '':\n id_to_captions[image_name].append(caption_temp)\n print(\"Loaded caption metadata for %d images from %s and image_id num is %s\" %\n (len(id_to_captions), captions_file, len(image_id)))\n else:\n id_to_captions = None\n for filename in os.listdir(image_dir):\n if filename.endswith(\".jpg\"):\n image_name = filename.split(\".\")[0]\n if image_name not in image_id:\n image_id.add(image_name)\n\n # Process the captions and combine the data into a list of ImageMetadata.\n print(\"Proccessing captions.\")\n image_metadata = []\n num_captions = 0\n id = 0\n for base_filename in image_id:\n localization = loc_dict[base_filename]\n filename = os.path.join(image_dir, base_filename + '.jpg')\n if id_to_captions is not None:\n captions = [_process_caption_jieba(c) for c in id_to_captions[base_filename]]\n flip_captions = [_process_caption_jieba(func_flip_caption(c)) for c in id_to_captions[base_filename]]\n num_captions += len(captions)\n else:\n captions = None\n flip_captions = None\n image_metadata.append(ImageMetadata(id, filename, base_filename, localization, captions, flip_captions))\n id = id + 1\n print(\"Finished processing %d captions for %d images in %s\" %\n (num_captions, len(image_id), captions_file))\n return image_metadata\n\n\ndef main(unused_argv):\n def _is_valid_num_shards(num_shards):\n \"\"\"Returns True if num_shards is compatible with FLAGS.num_threads.\"\"\"\n return num_shards < FLAGS.num_threads or not num_shards % FLAGS.num_threads\n\n assert _is_valid_num_shards(FLAGS.train_shards), (\n \"Please make the FLAGS.num_threads commensurate with FLAGS.train_shards\")\n assert _is_valid_num_shards(FLAGS.validate_shards), (\n \"Please make the FLAGS.num_threads commensurate with FLAGS.validate_shards\")\n assert _is_valid_num_shards(FLAGS.test1_shards), (\n \"Please make the FLAGS.num_threads commensurate with FLAGS.test1_shards\")\n assert _is_valid_num_shards(FLAGS.test2_shards), (\n \"Please make the FLAGS.num_threads commensurate with FLAGS.test2_shards\")\n\n if not tf.gfile.IsDirectory(FLAGS.output_dir):\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n if FLAGS.task == \"train\":\n # Load image metadata from caption files.\n train_dataset = _load_and_process_metadata(FLAGS.train_captions_file,\n FLAGS.train_localizations_file,\n FLAGS.train_image_dir)\n\n # Create vocabulary from the training captions.\n vocab = load_vocab(FLAGS.word_counts_input_file)\n _process_dataset(\"train\", train_dataset, vocab, FLAGS.train_shards)\n\n elif FLAGS.task == \"validate\":\n # Load image metadata from caption files.\n validate_dataset = _load_and_process_metadata(None,\n FLAGS.validate_localizations_file,\n FLAGS.validate_image_dir)\n\n # Create vocabulary from the training captions.\n vocab = load_vocab(FLAGS.word_counts_input_file)\n _process_dataset(\"validate\", validate_dataset, vocab, FLAGS.validate_shards)\n\n elif FLAGS.task == \"test1\":\n # Load image metadata from caption files.\n test1_dataset = _load_and_process_metadata(None,\n FLAGS.test1_localizations_file,\n FLAGS.test1_image_dir)\n\n # Create vocabulary from the training captions.\n vocab = load_vocab(FLAGS.word_counts_input_file)\n _process_dataset(\"test1\", test1_dataset, vocab, FLAGS.test1_shards)\n\n elif FLAGS.task == \"test2\":\n # Load image metadata from caption files.\n test2_dataset = _load_and_process_metadata(None,\n FLAGS.test2_localizations_file,\n FLAGS.test2_image_dir)\n\n # Create vocabulary from the training captions.\n vocab = load_vocab(FLAGS.word_counts_input_file)\n _process_dataset(\"test2\", test2_dataset, vocab, FLAGS.test2_shards)\n\nif __name__ == \"__main__\":\n tf.app.run()\n","sub_path":"scripts/build_localization_tfrecords.py","file_name":"build_localization_tfrecords.py","file_ext":"py","file_size_in_byte":23801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"261476681","text":"from stage import *\nfrom cards import *\nimport random\n\nclass Game:\n '''Classic game of War'''\n\n def info():\n return {'name' : 'War', 'players' : '2'}\n\n def __init__(self,engine,numPlayers):\n engine.registerPhases(\"play war next lose win\".split())\n engine.setPhase(\"play\")\n self.decks = [Deck() for p in range(1,numPlayers+1)]\n stdDeck().shuffle().deal(self.decks, 27)\n for p in range(1, numPlayers+1):\n engine.registerZone('battle',p)\n engine.registerZone('war',p)\n print(\"**\",p)\n engine.registerDeck(self.decks[p-1],p)\n self.numPlayers = numPlayers\n engine.setTurn(Turn(1))\n self.playersPlayed=0\n engine.ended = False\n\n def play(self,engine):\n '''Handle the play phase'''\n if self.playersPlayed == self.numPlayers:\n self.playersPlayed = 0\n c1 = engine.browseZone('battle', 1)[0]\n c2 = engine.browseZone('battle', 2)[0]\n if( c1.rank > c2.rank ):\n engine.ui.status('Player 1 won the battle')\n engine.unplay(c1,'battle',1)\n engine.unplay(c2,'battle',2)\n tmp = [c1,c2]\n random.shuffle(tmp)\n engine.placeOnBottom(1,'deck', tmp)\n engine.setPhase(\"next\")\n elif( c2.rank > c1.rank ):\n engine.ui.status('Player 2 won the battle')\n engine.unplay(c1,'battle',1)\n engine.unplay(c2,'battle',2)\n tmp = [c1,c2]\n random.shuffle(tmp)\n engine.placeOnBottom(2,'deck', tmp)\n engine.setPhase(\"next\")\n else:\n engine.ui.status('The battle is a draw!')\n engine.setPhase(\"war\")\n else:\n def playCard(e=engine,s=self):\n card = e.draw(e.turn.player)\n e.play(e.draw(e.turn.player),'battle',e.turn.player)\n s.playersPlayed += 1\n e.setTurn(Turn((e.turn.player%s.numPlayers)+1))\n engine.registerOption('play', playCard)\n\n def war(self,engine):\n engine.ui.status('War!')\n engine.setPhase('next')\n\n def next(self,engine):\n engine.ui.status('Next!')\n engine.setPhase('play')\n\n def lose(self,engine):\n engine.ui.status('Player has lost')\n self.ended = True\n\n def win(self,engine):\n engine.ui.status('Player has won')\n self.ended = True\n","sub_path":"gcge/games/war.py","file_name":"war.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"314673134","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n__all__ = [\n \"ShuffleNetV2\",\n \"shufflenetv2025\",\n \"shufflenetv205\",\n \"shufflenetv210\",\n \"shufflenetv215\",\n \"shufflenetv220\",\n]\n\n\nclass ShuffleBlock(nn.Module):\n def __init__(self, groups):\n super(ShuffleBlock, self).__init__()\n self.groups = groups\n\n def forward(self, x):\n \"\"\"Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]\"\"\"\n N, C, H, W = x.size()\n g = self.groups\n return x.view(N, g, int(C / g), H, W).permute(0, 2, 1, 3, 4).contiguous().view(N, C, H, W)\n\n\nclass Bottleneck(nn.Module):\n def __init__(self, in_planes, out_planes, stride):\n super(Bottleneck, self).__init__()\n self.stride = stride\n\n mid_planes = out_planes // 2\n if stride == 1:\n self.main = nn.Sequential(\n nn.Conv2d(mid_planes, mid_planes, kernel_size=1, bias=False),\n nn.BatchNorm2d(mid_planes),\n nn.ReLU(),\n nn.Conv2d(\n mid_planes,\n mid_planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n groups=mid_planes,\n bias=False,\n ),\n nn.BatchNorm2d(mid_planes),\n nn.Conv2d(mid_planes, mid_planes, kernel_size=1, bias=False),\n nn.BatchNorm2d(mid_planes),\n nn.ReLU(),\n )\n else:\n self.main = nn.Sequential(\n nn.Conv2d(in_planes, mid_planes, kernel_size=1, bias=False),\n nn.BatchNorm2d(mid_planes),\n nn.ReLU(),\n nn.Conv2d(\n mid_planes,\n mid_planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n groups=mid_planes,\n bias=False,\n ),\n nn.BatchNorm2d(mid_planes),\n nn.Conv2d(mid_planes, mid_planes, kernel_size=1, bias=False),\n nn.BatchNorm2d(mid_planes),\n nn.ReLU(),\n )\n self.shuffle1 = ShuffleBlock(2)\n\n self.shortcut = nn.Sequential()\n if stride == 2:\n self.shortcut = nn.Sequential(\n nn.Conv2d(\n in_planes,\n in_planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n groups=in_planes,\n bias=False,\n ),\n nn.BatchNorm2d(in_planes),\n nn.Conv2d(in_planes, mid_planes, kernel_size=1, bias=False),\n nn.BatchNorm2d(mid_planes),\n nn.ReLU(),\n )\n\n def forward(self, x):\n if self.stride == 1:\n x1 = x[:, : (x.shape[1] // 2), :, :]\n x2 = x[:, (x.shape[1] // 2) :, :, :]\n x = torch.cat((x1, self.main(x2)), 1)\n else:\n x = torch.cat((self.shortcut(x), self.main(x)), 1)\n return self.shuffle1(x)\n\n\nclass ShuffleNetV2(nn.Module):\n def __init__(self, cfg, num_classes=10):\n super(ShuffleNetV2, self).__init__()\n out_planes = cfg[\"out_planes\"]\n num_blocks = cfg[\"num_blocks\"]\n\n self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(24)\n self.in_planes = 24\n self.layer1 = self._make_layers(out_planes[0], num_blocks[0])\n self.layer2 = self._make_layers(out_planes[1], num_blocks[1])\n self.layer3 = self._make_layers(out_planes[2], num_blocks[2])\n self.conv5 = nn.Conv2d(out_planes[2], 1024, kernel_size=1)\n self.linear = nn.Linear(1024, num_classes)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.conv5(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\n def _make_layers(self, out_planes, num_blocks):\n layers = []\n for i in range(num_blocks):\n stride = 2 if i == 0 else 1\n # cat_planes = self.in_planes if i == 0 else 0\n cat_planes = 0\n layers.append(Bottleneck(self.in_planes, out_planes - cat_planes, stride=stride))\n self.in_planes = out_planes\n return nn.Sequential(*layers)\n\n\ndef shufflenetv2025(**kwargs):\n cfg = {\"out_planes\": [24, 48, 96, 512], \"num_blocks\": [4, 8, 4]}\n return ShuffleNetV2(cfg, **kwargs)\n\n\ndef shufflenetv205(**kwargs):\n cfg = {\"out_planes\": [48, 96, 192, 1024], \"num_blocks\": [4, 8, 4]}\n return ShuffleNetV2(cfg, **kwargs)\n\n\ndef shufflenetv210(**kwargs):\n cfg = {\"out_planes\": [116, 232, 464, 1024], \"num_blocks\": [4, 8, 4]}\n return ShuffleNetV2(cfg, **kwargs)\n\n\ndef shufflenetv215(**kwargs):\n cfg = {\"out_planes\": [176, 352, 704, 1024], \"num_blocks\": [4, 8, 4]}\n return ShuffleNetV2(cfg, **kwargs)\n\n\ndef shufflenetv220(**kwargs):\n cfg = {\"out_planes\": [244, 488, 976, 2048], \"num_blocks\": [4, 8, 4]}\n return ShuffleNetV2(cfg, **kwargs)\n","sub_path":"pytorchcv/model_zoo/cifar/shufflenetv2.py","file_name":"shufflenetv2.py","file_ext":"py","file_size_in_byte":5256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"615643093","text":"import discord\nimport asyncio\n\nasync def bot_help(bot, command_prefix, sub_command, message):\n\t\n\tif message.content.strip() == \";help\":\n\t\t\n\t\thelp = \"\"\"{}\nHere is a list of my commands:\n\n```\n;help | loads the help module\n |\n;test | prints a simple test message\n |\n;event | schedule and view team \n | events\n |\n;ping | create a custom message to\n | display when you are pinged\n```\nType ```;help``` + the name of a command to learn more about it.\n\"\"\".format(message.author.mention)\n\n\t\tawait bot.send_message(message.channel, help)\n\n\tif message.content.startswith(\";help help\"):\n\t\t\n\t\tawait bot.send_message(message.channel, \"{} *Really?* :unamused:\".format(message.author.mention))\n","sub_path":"bot_help.py","file_name":"bot_help.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"604813445","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef iteration_layer(x, num_iter, act_func):\n x_flat = x.contiguous().view(x.shape[0], -1)\n for i in range(num_iter):\n act_energy = torch.tanh(act_func(x_flat))\n x_flat = torch.cat((x_flat[:, :-act_energy.shape[1]], act_energy), 1)\n x_out = x_flat.view(x.shape[0], 8, -1)\n return x_out\n\n\nclass Net(nn.Module):\n\n def __init__(self, n_pts, depth, iter_vec):\n self.n_pts = n_pts\n self.depth = depth\n self.iter_vec = iter_vec\n super(Net, self).__init__()\n self.fc_init = nn.Linear(7 * (self.n_pts + 1), self.n_pts + 1)\n #self.pad = nn.ReplicationPad1d(1)\n #self.pool = nn.AvgPool1d(kernel_size=3, stride=2, padding=False)\n self.tconv = nn.ConvTranspose1d(8, 1, 3, stride=2, padding=1, output_padding=0)\n self.fc_fin = nn.Linear(8*(self.n_pts+1), self.n_pts + 1)\n # Create list of fully connected layers\n self.fc_iter = nn.ModuleList([nn.Linear(8 * int(self.n_pts / (2 ** i) + 1), int(self.n_pts / (2 ** i) + 1))\n for i in range(self.depth)])\n\n def forward(self, x):\n bound_0 = x[:, 5, 0].view(x.shape[0], 1, -1)\n bound_1 = x[:, 6, 0].view(x.shape[0], 1, -1)\n batch_sz = x.shape[0]\n x_flat = x.contiguous().view(batch_sz, -1)\n out = F.relu(self.fc_init(x_flat))\n out = out.view(batch_sz, 1, -1)\n x_out = torch.cat((x, out), 1)\n #x_out = torch.cat((x[:, :5, :], out), 1)\n\n # Loop over the depth\n x_net = [None] * self.depth\n x_net[0] = x_out\n for i in range(self.depth):\n if i != 0:\n #x_net[i] = self.pool(self.pad(x_net[i - 1]))\n x_net[i] = x_net[i-1][:, :, ::2]\n x_net[i] = iteration_layer(x_net[i], self.iter_vec[i], self.fc_iter[i])\n\n # Scale back up\n for i in reversed(range(1, self.depth)):\n res = F.relu(self.tconv(x_net[i]))\n x_net[i - 1] = torch.cat((x_net[i - 1][:, :-1, :], (x_net[i - 1][:, -1, :]\n + res.view(batch_sz, -1)).view(batch_sz, 1, -1)), 1)\n x_net[i - 1] = iteration_layer(x_net[i-1], self.iter_vec[i-1], self.fc_iter[i-1])\n\n #output = self.conv(x_net[0])\n x_flat = x_net[0].contiguous().view(x_net[0].shape[0], -1)\n output = self.fc_fin(x_flat).view(x_flat.shape[0], 1, -1)\n\n output[:, :, 0] = bound_0.view(-1, 1)\n output[:, :, -1] = bound_1.view(-1, 1)\n return output\n","sub_path":"j_net.py","file_name":"j_net.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"291859589","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom MultLinReg.utilsLinReg import repair_target, repair_rcdata, repair_numdata, repair_chrdata\nfrom MultLinReg.utilsLinReg import regression_results\nimport statsmodels.api as sm\n\n### LOAD DATA ###\ncountry = 'CHN'\nprint('-'*20); print('LOAD DATA'); print('-'*20)\ndataFrm = pd.read_csv('owid-covid-data.csv', sep=',')\ndataF = dataFrm[dataFrm['iso_code'].isin([country])].copy()\nprint(dataF.head())\nprint(dataF.tail())\nprint('Original data columns: ', list(dataF.columns.values))\nprint('Original data records: ', len(dataF.index))\n\n### CHECKING MISSING DATA ###\nprint('-'*20); print('CHECKING MISSING DATA'); print('-'*20)\nprint('#'*3); print(f'Before: \\n{dataF.isnull().sum()}'); print('#'*3);\n\n# CHECK 1: drop columns because of no data\n# (i.e. number of nulls in a particular column = total dataframe records)\ndropList = ['new_tests', 'total_tests', 'total_tests_per_thousand', 'new_tests_per_thousand', \\\n 'new_tests_smoothed', 'new_tests_smoothed_per_thousand', 'tests_per_case', \\\n 'positive_rate', 'tests_units', 'handwashing_facilities']\ndataF = dataF.drop(columns=dropList)\nprint(f'CHECK 1 (Drop No Data Columns): {dropList}')\n\n# CHECK 2: drop columns because of same value through the column or insignificant columns\ndropList = ['hospital_beds_per_thousand', 'life_expectancy']\ndataF = dataF.drop(columns=dropList)\nprint(f'CHECK 2 (Drop Insignificant Columns): {dropList}')\n\n# CHECK 3: educated guess to repair data\n# For example:\n# repair = dataF[dataF['new_cases_smoothed'].isnull()]\n# print(repair[['new_cases', 'new_cases_smoothed']])\n# 'new_cases' = 0,15,17,27 for 'new_cases_smoothed' = NULL\n\nprint(f'CHECK 3 (Repair Data): .....')\n\n# Method 1: repair dataframe column contained NaN with mean value of related dataframe column\ntarget = repair_target(df=dataF, tCol='new_cases_smoothed', rCol='new_cases')\n# target = [0.0, 15.0, 17.0, 27.0]\nfor i in range(len(target)):\n dataF, meanVal, count = repair_rcdata(df=dataF, tCol='new_cases_smoothed', rCol='new_cases', target=target[i])\n\ntarget = repair_target(df=dataF, tCol='new_deaths_smoothed', rCol='new_deaths')\n# target = [0.0]\nfor i in range(len(target)):\n dataF, meanVal, count = repair_rcdata(df=dataF, tCol='new_deaths_smoothed', rCol='new_deaths', target=target[i])\n\ntarget = repair_target(df=dataF, tCol='new_cases_smoothed_per_million', rCol='new_cases_per_million')\n# target = [0.0, 0.01, 0.012, 0.019]\nfor i in range(len(target)):\n dataF, meanVal, count = repair_rcdata(df=dataF, tCol='new_cases_smoothed_per_million', \\\n rCol='new_cases_per_million', target=target[i])\n\ntarget = repair_target(df=dataF, tCol='new_deaths_smoothed_per_million', rCol='new_deaths_per_million')\n# target = [0.0]\nfor i in range(len(target)):\n dataF, meanVal, count = repair_rcdata(df=dataF, tCol='new_deaths_smoothed_per_million', \\\n rCol='new_deaths_per_million', target=target[i])\n\n# Method 2: repair NaN with median value in a particular column\ndataF, medianVal, count = repair_numdata(dataF, 'stringency_index')\n\n# Method 3: repair NaN with the most frequent word in a particular column\n# dataF, word, count = repair_chrdata(dataF, 'continent')\n\nprint('#'*3); print(f'After: \\n{dataF.isnull().sum()} '); print('#'*3)\nprint('Cleaned data columns: ', list(dataF.columns.values))\nprint('Cleaned data records: ', len(dataF.index))\n# save the repaired dataframe to a CSV file\ntimeline = dataF['date'].copy()\ndataF.to_csv('covid19_CN.csv', index=False)\n\n### PREPROCESS DATA ###\nprint('-'*20); print('PREPROCESS DATA'); print('-'*20)\n# Categorical boolean mask\ncategorical_feature_mask = dataF.dtypes == object\n# print(categorical_feature_mask)\n# filter categorical columns using mask and turn it into a list\ncategorical_cols = dataF.columns[categorical_feature_mask].tolist()\nprint(f'Categorical columns: {categorical_cols}')\n# instantiate label encoder object\nle = LabelEncoder()\n# apply le on categorical feature columns\ndataF[categorical_cols] = dataF[categorical_cols].apply(lambda col: le.fit_transform(col))\nprint(dataF[categorical_cols].head())\n# print(dataF[categorical_cols].tail())\n\n### ANALYSIS MULTIPLE REGRESSION MODEL FITTING ###\n# Linear Regression: y = mX + c\n# Multiple Regression: y = aX1 + bX2 + cX3 + ... + n\n# Polynomial Regression: y = aX^1 + bX^2 + cX^3 + ....\nprint('-'*20); print('ANALYSIS MULTIPLE REGRESSION MODEL'); print('-'*20)\n# \"Stringency Index\" a composite measure based on nine response indicators including\n# school closures, workplace closures, and travel bans, rescaled to a value from 0 to 100 (100 = strictest)\nX = np.array(dataF[['new_cases', 'new_deaths', 'stringency_index']]).reshape(-1,3)\n# X = np.array(dataF[['new_cases', 'stringency_index']]).reshape(-1,2)\ny = np.array(dataF['total_cases']).reshape(-1,1)\nmodel = linear_model.LinearRegression()\nmodel.fit(X, y)\nprint(f'Accuracy: {round(model.score(X, y)*100, 3)} %')\nprint(f'Intercept: {model.intercept_} ')\nprint(f'Coefficients: {model.coef_} ')\nprint('\\n')\n# statsmodels for multiple regression model fitting\n# X = sm.add_constant(dataF[['new_cases', 'new_deaths', 'stringency_index']])\n# mregr = sm.OLS(y, X).fit()\n# print(mregr.summary())\n\n### TRAINING ###\ntestRatio = 0.2\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=testRatio)\nprint('-'*20); print('TRAINING MODEL'); print('-'*20)\nmodel = linear_model.LinearRegression()\nmodel.fit(X_train, y_train)\nprint(f'Model Coefficient: {model.coef_}')\nprint(f'Model Intercept: {model.intercept_}')\naccuracy = model.score(X_train, y_train)\n# print('Accuracy: %.3f %%' % (accuracy*100))\nprint(f'Accuracy: {round((accuracy*100),3)} % \\n')\n# check the regression statistics\ny_pred = model.predict(X_test)\nregression_results(y_test, y_pred)\n\n### PREDICTION ###\nprint('-'*20); print('PREDICTION PLOT'); print('-'*20)\n### Prepare a timeline for plotting\ntimeln = pd.to_datetime(timeline)\nday = timeln.dt.day.astype(str)\nmth = timeln.dt.month_name().str.slice(stop=3)\nyrs = timeln.dt.year.astype(str).str.slice(start=2)\nxt = mth + yrs # get the label of month+year array\n# Find all the indexes of timeline for Day 1\nxtk = [] # store the indexes for x-axis label location\nxtkLabel = [] # store corresponding labels of month and year\nxtkTarget = ['1', '2', '3', '4', '5']\nxtkFlag = True\nfor i in range(0, len(timeln)):\n if day.array[i] in xtkTarget: # match Day 1/2/3/4/5 of the month for x-axis label location\n if xtkFlag:\n xtk.append(i)\n xtkLabel.append(xt.array[i])\n xtkFlag = False\n else:\n xtkFlag = True\n# print(xtk)\n# print(xtkLabel)\n\nplt.figure(1, figsize=(8,5))\nx = np.array(range(0, len(timeln))).reshape(-1,1)\ny0 = np.array(dataF['total_cases']).reshape(-1,1)\ny1 = model.predict(X)\nplt.plot(x, y0)\nplt.scatter(x, y0, s=15, c='red', marker='o', label='actual')\nplt.scatter(x, y1, s=15, c='green', marker='x', label='predict')\nplt.xticks(ticks=xtk, labels=xtkLabel, fontsize=7)\nplt.yticks(np.arange(-10000, max(y0), 10000))\nplt.xlabel('Date')\nplt.ylabel('cases')\nplt.grid()\nplt.legend()\nplt.title('Coronavirus Trend - China')\nplt.show()","sub_path":"MultLinReg/multireg.py","file_name":"multireg.py","file_ext":"py","file_size_in_byte":7337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"431739620","text":"import os\nimport re\n\nimport irc.plugins\n\n\nclass Plug(irc.plugins.PluginTemplate):\n def __init__(self):\n super(Plug, self).__init__()\n self.command = \"memory\"\n self.helptext = \"returns memory usage of bot\"\n\n def call(self, msg, con):\n nick, channel, params = irc.util.parseprivmsg(msg, con.nick)\n\n con.privmsg(channel, memory())\n\n\ndef memory():\n \"\"\".memory - Displays the bot's current memory usage.\"\"\"\n if os.name == \"posix\":\n # get process info\n sfile = open('/proc/self/status')\n status_file = sfile.read()\n s = dict(re.findall(r'^(\\w+):\\s*(.*)\\s*$', status_file, re.M))\n # get the data we need and process it\n data = s['VmRSS'], s['VmSize'], s['VmPeak'], s['VmStk'], s['VmData']\n data = [float(i.replace(' kB', '')) for i in data]\n strings = [convert_kilobytes(i) for i in data]\n # prepare the output\n out = \"Threads: \\x02{}\\x02, Real Memory: \\x02{}\\x02, Allocated Memory: \\x02{}\\x02, Peak \" \\\n \"Allocated Memory: \\x02{}\\x02, Stack Size: \\x02{}\\x02, Heap \" \\\n \"Size: \\x02{}\\x02\".format(s['Threads'], strings[0], strings[1], strings[2],\n strings[3], strings[4])\n # return output\n sfile.close()\n return out\n\n elif os.name == \"nt\":\n cmd = 'tasklist /FI \"PID eq %s\" /FO CSV /NH' % os.getpid()\n out = os.popen(cmd).read()\n memory = 0\n for amount in re.findall(r'([,0-9]+) K', out):\n memory += float(amount.replace(',', ''))\n memory = convert_kilobytes(memory)\n return \"Memory Usage: \\x02{}\\x02\".format(memory)\n\n else:\n return \"Sorry, this command is not supported on your OS.\"\n\n\ndef convert_kilobytes(kilobytes):\n if kilobytes >= 1024:\n megabytes = kilobytes / 1024\n size = '%.2f MB' % megabytes\n else:\n size = '%.2f KB' % kilobytes\n return size","sub_path":"plugins/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"522179771","text":"#! /usr/bin/env python3\n\nimport csv\nimport os\nfrom random import *\n\ninputFile = \"movie_metadata.csv\"\n\nnumFiles = int(input(\"Number of files to create: \"))\nnumRows = int(input(\"Number of rows between 1 and 5043: \"))\n\nos.mkdir(str(numFiles))\npath = \"./\" + str(numFiles) + \"/\"\n\nreader = csv.reader(open(inputFile, 'r'))\nreader = list(reader)\n\n#write all csv files\nfor i in range(numFiles):\n writer = csv.writer(open(path +'metadata_{}.csv'.format(i), 'w'))\n writer.writerow(reader[0]) #category row\n for i in range(numRows):\n writer.writerow(reader[randint(1,5043)])\n","sub_path":"part1/testgen.py","file_name":"testgen.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"636945312","text":"#File I/O Test Question 2\n#Jihal Patel\n#765697\n#ICS4U0-A\n#25 May 2018\n\ninputFile = open(\"input.txt\", \"r\") #Opens input file to read\ninputContents = inputFile.readlines() #Reads every line in the input file\ninputFile.close() #Closes opened input file\ndistances = inputContents[0].split() #Splits all the distances between cities\noutput = [[], [], [], [], []] #Creates a 2d list to store all the distance values for the output\n\nfor i in range(len(distances)+1): #Goes through all the distances and a additional value for the 5th city\n #If i is 0 meaning the distances to calculate are from city 1 then calculate and append appropriate distances to the variable output\n if i == 0: \n output[i].append(0)\n d1 = int(distances[i])\n output[i].append(d1)\n d2 = d1+int(distances[i+1])\n output[i].append(d2)\n d3 = d2+int(distances[i+2])\n output[i].append(d3)\n d4 = d3+int(distances[i+3])\n output[i].append(d4)\n #If i is 1 meaning the distances to calculate are from city 2 then calculate and append appropriate distances to the variable output \n if i == 1:\n d1 = int(distances[i-1])\n output[i].append(d1)\n output[i].append(0)\n d2 = int(distances[i])\n output[i].append(d2)\n d3 = d2+int(distances[i+1])\n output[i].append(d3)\n d4 = d3+int(distances[i+2])\n output[i].append(d4)\n #If i is 2 meaning the distances to calculate are from city 3 then calculate and append appropriate distances to the variable output\n if i == 2:\n d1 = int(distances[i-1])+int(distances[i-2])\n output[i].append(d1)\n d2 = int(distances[i-1])\n output[i].append(d2)\n output[i].append(0)\n d3 = int(distances[i])\n output[i].append(d3)\n d4 = d3+int(distances[i+1])\n output[i].append(d4)\n #If i is 3 meaning the distances to calculate are from city 4 then calculate and append appropriate distances to the variable output\n if i == 3:\n d1 = int(distances[i-1])+int(distances[i-2])+int(distances[i-3])\n output[i].append(d1)\n d2 = int(distances[i-1])+int(distances[i-2])\n output[i].append(d2)\n d3 = int(distances[i-1])\n output[i].append(d3)\n output[i].append(0)\n d4 = int(distances[i])\n output[i].append(d4)\n #If i is 4 meaning the distances to calculate are from city 5 then calculate and append appropriate distances to the variable output\n if i == 4:\n d1 = int(distances[i-1])+int(distances[i-2])+int(distances[i-3])+int(distances[i-4])\n output[i].append(d1)\n d2 = int(distances[i-1])+int(distances[i-2])+int(distances[i-3])\n output[i].append(d2)\n d3 = int(distances[i-1])+int(distances[i-2])\n output[i].append(d3)\n d4 = int(distances[i-1])\n output[i].append(d4)\n output[i].append(0)\n \noutputFile = open('output.txt', 'w') #Opens output file to write in \nfor i in range(len(output)): #Goes through all the 5 city distances\n print(output[i][0], output[i][1], output[i][2], output[i][3], output[i][4]) #Prints output to screen as required in the question\n for j in range(len(output[i])): #Goes through all the distances at the ith city\n outputFile.write(str(output[i][j])+' ') #Writes the output to the output file\n outputFile.write('\\n') #Writes a new line character to the file in order to print the next city distances on a new line\noutputFile.close() #Closes output file\n\n","sub_path":"Python/File Reading/File Test 2.py","file_name":"File Test 2.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"223619024","text":"import math\nimport timeit\n\ndef primes_im(n):\n res = []\n for i in range(2,n):\n rge = math.floor(math.sqrt(i))\n if i == 2 or i == 3:\n res.append(i)\n else:\n for j in range(2,rge+1):\n if i%j == 0:\n break\n elif j == rge:\n res.append(i)\n return res\n\ndef primes_lc(n):\n return [x for x in range(2, n) if x == 2 or x == 3 or all(x % y != 0 for y in range(2, math.floor(math.sqrt(n))+1))]\n\ndef is_prime_fun(n):\n return len(list(filter(lambda k: n%k == 0 and n != 2, range(2,math.floor(math.sqrt(n))+1)))) == 0\n\ndef primes_fun(n):\n return list(filter(is_prime_fun,range(2,n)))\n\nclass PrimesIter:\n def __init__(self, up_to):\n self.current = 2\n self.up_to = up_to\n\n def __iter__(self):\n return self\n\n def __next__(self):\n while True:\n if self.current > self.up_to:\n raise StopIteration\n is_prime = True\n for x in range(2, math.floor(math.sqrt(self.current))):\n if self.current % x == 0:\n is_prime = False\n self.current += 1\n break\n if is_prime:\n ret = self.current\n self.current += 1\n return ret\n\ntimeit_setup = '''from zadanie1 import (\n PrimesIter\n )'''\nfor i in range(0, 1000, 100):\n print(timeit.timeit(lambda: primes_im(i), number=50))\n print(timeit.timeit(lambda: primes_lc(i), number=50))\n print(timeit.timeit(lambda: primes_fun(i), number=50))\n print(timeit.timeit(f'for n in PrimesIter({i}): n', setup=timeit_setup, number=50))\n print('-'*10)","sub_path":"zima2019/RozszerzonyPython/Lista5/zadanie1.py","file_name":"zadanie1.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"511651180","text":"#------------------------------------------------------------------------------------------#\n# This file is part of Pyccel which is released under MIT License. See the LICENSE file or #\n# go to https://github.com/pyccel/pyccel/blob/master/LICENSE for full license details. #\n#------------------------------------------------------------------------------------------#\n\n\"\"\"\nModule representing objects (functions/variables etc) required for the interface\nbetween python code and C code (using Python/C Api and cwrapper.c).\n\"\"\"\n\nfrom ..errors.errors import Errors\nfrom ..errors.messages import PYCCEL_RESTRICTION_TODO\n\nfrom .basic import Basic\n\nfrom .datatypes import DataType\nfrom .datatypes import NativeInteger, NativeReal, NativeComplex\nfrom .datatypes import NativeBool, NativeString, NativeGeneric\n\nfrom .core import FunctionDefArgument\nfrom .core import FunctionCall, FunctionDef, FunctionAddress\n\nfrom .variable import Variable\n\n\nerrors = Errors()\n\n__all__ = (\n#\n# --------- CLASSES -----------\n#\n 'PyccelPyObject',\n 'PyccelPyArrayObject',\n 'PyArgKeywords',\n 'PyArg_ParseTupleNode',\n 'PyBuildValueNode',\n#--------- CONSTANTS ----------\n 'Py_True',\n 'Py_False',\n 'Py_None',\n 'flags_registry',\n#----- C / PYTHON FUNCTIONS ---\n 'Py_DECREF',\n 'PyErr_SetString',\n#----- CHECK FUNCTIONS ---\n 'generate_datatype_error',\n 'scalar_object_check',\n)\n\n#-------------------------------------------------------------------\n# Python DataTypes\n#-------------------------------------------------------------------\nclass PyccelPyObject(DataType):\n \"\"\" Datatype representing a PyObject which is the\n class used to hold python objects\"\"\"\n __slots__ = ()\n _name = 'pyobject'\n\nclass PyccelPyArrayObject(DataType):\n \"\"\" Datatype representing a PyArrayObject which is the\n class used to hold numpy objects\"\"\"\n __slots__ = ()\n _name = 'pyarrayobject'\n\nPyArray_Type = Variable(NativeGeneric(), 'PyArray_Type')\n\n#-------------------------------------------------------------------\n# Parsing and Building Classes\n#-------------------------------------------------------------------\n\n#TODO: Is there an equivalent to static so this can be a static list of strings?\nclass PyArgKeywords(Basic):\n \"\"\"\n Represents the list containing the names of all arguments to a function.\n This information allows the function to be called by keyword\n\n Parameters\n ----------\n name : str\n The name of the variable in which the list is stored\n arg_names : list of str\n A list of the names of the function arguments\n \"\"\"\n __slots__ = ('_name','_arg_names')\n _attribute_nodes = ()\n def __init__(self, name, arg_names):\n self._name = name\n self._arg_names = arg_names\n super().__init__()\n\n @property\n def name(self):\n \"\"\" The name of the variable in which the list of\n all arguments to the function is stored\n \"\"\"\n return self._name\n\n @property\n def arg_names(self):\n \"\"\" The names of the arguments to the function which are\n contained in the PyArgKeywords list\n \"\"\"\n return self._arg_names\n\n#using the documentation of PyArg_ParseTuple() and Py_BuildValue https://docs.python.org/3/c-api/arg.html\npytype_parse_registry = {\n (NativeInteger(), 4) : 'i',\n (NativeInteger(), 8) : 'l',\n (NativeInteger(), 2) : 'h',\n (NativeInteger(), 1) : 'b',\n (NativeReal(), 8) : 'd',\n (NativeReal(), 4) : 'f',\n (NativeComplex(), 4) : 'O',\n (NativeComplex(), 8) : 'O',\n (NativeBool(), 4) : 'p',\n (NativeString(), 0) : 's',\n (PyccelPyObject(), 0) : 'O',\n (PyccelPyArrayObject(), 0) : 'O!',\n }\n\nclass PyArg_ParseTupleNode(Basic):\n \"\"\"\n Represents a call to the function from Python.h which collects the expected arguments\n\n Parameters\n ----------\n python_func_args: Variable\n Args provided to the function in python\n python_func_kwargs: Variable\n Kwargs provided to the function in python\n c_func_args: list of Variable\n List of expected arguments. This helps determine the expected output types\n parse_args: list of Variable\n List of arguments into which the result will be collected\n arg_names : list of str\n A list of the names of the function arguments\n is_interface : boolean\n Default value False and True when working with interface functions\n \"\"\"\n __slots__ = ('_pyarg','_pykwarg','_parse_args','_arg_names','_flags')\n _attribute_nodes = ('_pyarg','_pykwarg','_parse_args','_arg_names')\n\n def __init__(self, python_func_args,\n python_func_kwargs,\n c_func_args, parse_args,\n arg_names):\n if not isinstance(python_func_args, Variable):\n raise TypeError('Python func args should be a Variable')\n if not isinstance(python_func_kwargs, Variable):\n raise TypeError('Python func kwargs should be a Variable')\n if not all(isinstance(c, FunctionDefArgument) for c in c_func_args):\n raise TypeError('C func args should be a list of Arguments')\n if not isinstance(parse_args, list) and any(not isinstance(c, Variable) for c in parse_args):\n raise TypeError('Parse args should be a list of Variables')\n if not isinstance(arg_names, PyArgKeywords):\n raise TypeError('Parse args should be a list of Variables')\n if len(parse_args) != len(c_func_args):\n raise TypeError('There should be the same number of c_func_args and parse_args')\n\n self._flags = ''\n i = 0\n\n while i < len(c_func_args) and not c_func_args[i].has_default:\n self._flags += self.get_pytype(c_func_args[i], parse_args[i])\n i+=1\n if i < len(c_func_args):\n self._flags += '|'\n while i < len(c_func_args):\n self._flags += self.get_pytype(c_func_args[i], parse_args[i])\n i+=1\n # Restriction as of python 3.8\n if any([isinstance(a, (Variable, FunctionAddress)) and a.is_kwonly for a in c_func_args]):\n errors.report('Kwarg only arguments without default values will not raise an error if they are not passed',\n symbol=c_func_args, severity='warning')\n\n parse_args = [[PyArray_Type, a] if isinstance(a, Variable) and a.dtype is PyccelPyArrayObject()\n else [a] for a in parse_args]\n parse_args = [a for arg in parse_args for a in arg]\n\n self._pyarg = python_func_args\n self._pykwarg = python_func_kwargs\n self._parse_args = parse_args\n self._arg_names = arg_names\n super().__init__()\n\n def get_pytype(self, c_arg, parse_arg):\n \"\"\"Return the needed flag to parse or build value\n \"\"\"\n if isinstance(c_arg, FunctionAddress):\n return 'O'\n else:\n try:\n return pytype_parse_registry[(parse_arg.dtype, parse_arg.precision)]\n except KeyError as e:\n raise NotImplementedError(\"Type not implemented for argument collection : \"+str(type(parse_arg))) from e\n\n @property\n def pyarg(self):\n \"\"\" The variable containing all positional arguments\n passed to the function\n \"\"\"\n return self._pyarg\n\n @property\n def pykwarg(self):\n \"\"\" The variable containing all keyword arguments\n passed to the function\n \"\"\"\n return self._pykwarg\n\n @property\n def flags(self):\n \"\"\" The flags indicating the types of the objects to\n be collected from the python arguments passed to the\n function\n \"\"\"\n return self._flags\n\n @property\n def args(self):\n \"\"\" The arguments into which the python args and kwargs\n are collected\n \"\"\"\n return self._parse_args\n\n @property\n def arg_names(self):\n \"\"\" The PyArgKeywords object which contains all the\n names of the function's arguments\n \"\"\"\n return self._arg_names\n\nclass PyBuildValueNode(Basic):\n \"\"\"\n Represents a call to the function from Python.h which create a new value based on a format string\n\n Parameters\n ---------\n parse_args: list of Variable\n List of arguments which the result will be buit from\n \"\"\"\n __slots__ = ('_flags','_result_args',)\n _attribute_nodes = ('_result_args',)\n\n def __init__(self, result_args = ()):\n self._flags = ''\n self._result_args = result_args\n for i in result_args:\n self._flags += pytype_parse_registry[(i.dtype, i.precision)]\n super().__init__()\n\n @property\n def flags(self):\n return self._flags\n\n @property\n def args(self):\n return self._result_args\n\n#-------------------------------------------------------------------\n# Python.h Constants\n#-------------------------------------------------------------------\n\n# Python.h object representing Booleans True and False\nPy_True = Variable(PyccelPyObject(), 'Py_True',is_pointer=True)\nPy_False = Variable(PyccelPyObject(), 'Py_False',is_pointer=True)\n\n# Python.h object representing None\nPy_None = Variable(PyccelPyObject(), 'Py_None', is_pointer=True)\n\n# https://docs.python.org/3/c-api/refcounting.html#c.Py_DECREF\nPy_DECREF = FunctionDef(name = 'Py_DECREF',\n body = [],\n arguments = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)],\n results = [])\n\n#-------------------------------------------------------------------\n# cwrapper.h functions\n#-------------------------------------------------------------------\n\ndef Python_to_C(c_object):\n \"\"\"\n Create FunctionDef responsible for casting python argument to C\n Parameters:\n ----------\n c_object : Variable\n The variable needed for the generation of the cast_function\n Returns\n -------\n FunctionDef : cast type FunctionDef\n \"\"\"\n try :\n cast_function = py_to_c_registry[(c_object.dtype, c_object.precision)]\n except KeyError:\n errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal')\n cast_func = FunctionDef(name = cast_function,\n body = [],\n arguments = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)],\n results = [Variable(dtype=c_object.dtype, name = 'v', precision = c_object.precision)])\n\n return cast_func\n\n# Functions definitions are defined in pyccel/stdlib/cwrapper/cwrapper.c\npy_to_c_registry = {\n (NativeBool(), 4) : 'PyBool_to_Bool',\n (NativeInteger(), 1) : 'PyInt8_to_Int8',\n (NativeInteger(), 2) : 'PyInt16_to_Int16',\n (NativeInteger(), 4) : 'PyInt32_to_Int32',\n (NativeInteger(), 8) : 'PyInt64_to_Int64',\n (NativeReal(), 4) : 'PyFloat_to_Float',\n (NativeReal(), 8) : 'PyDouble_to_Double',\n (NativeComplex(), 4) : 'PyComplex_to_Complex64',\n (NativeComplex(), 8) : 'PyComplex_to_Complex128'}\n\ndef C_to_Python(c_object):\n \"\"\"\n Create FunctionDef responsible for casting c argument to python\n Parameters:\n ----------\n c_object : Variable\n The variable needed for the generation of the cast_function\n Returns\n -------\n FunctionDef : cast type FunctionDef\n \"\"\"\n try :\n cast_function = c_to_py_registry[(c_object.dtype, c_object.precision)]\n except KeyError:\n errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal')\n\n cast_func = FunctionDef(name = cast_function,\n body = [],\n arguments = [Variable(dtype=c_object.dtype, name = 'v', precision = c_object.precision)],\n results = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)])\n\n return cast_func\n\n# Functions definitions are defined in pyccel/stdlib/cwrapper/cwrapper.c\n# TODO create cast functions of different precision of int issue #735\nc_to_py_registry = {\n (NativeBool(), 4) : 'Bool_to_PyBool',\n (NativeInteger(), 1) : 'Int8_to_PyLong',\n (NativeInteger(), 2) : 'Int16_to_PyLong',\n (NativeInteger(), 4) : 'Int32_to_PyLong',\n (NativeInteger(), 8) : 'Int64_to_PyLong',\n (NativeReal(), 4) : 'Float_to_PyDouble',\n (NativeReal(), 8) : 'Double_to_PyDouble',\n (NativeComplex(), 4) : 'Complex64_to_PyComplex',\n (NativeComplex(), 8) : 'Complex128_to_PyComplex'}\n\n\n#-------------------------------------------------------------------\n# errors and check functions\n#-------------------------------------------------------------------\n\n# https://docs.python.org/3/c-api/exceptions.html#c.PyErr_Occurred\nPyErr_Occurred = FunctionDef(name = 'PyErr_Occurred',\n arguments = [],\n results = [Variable(dtype = PyccelPyObject(), name = 'r', is_pointer = True)],\n body = [])\n\ndef PyErr_SetString(exception, message):\n \"\"\"\n Generate function Call of c/python api PyErr_SetString\n https://docs.python.org/3/c-api/exceptions.html#c.PyErr_SetString\n with a defined error message used to set the error indicator.\n\n Parameters:\n ----------\n exception : str\n The exception type\n message : str\n Error message\n Returns\n FunctionCall : raise error FunctionCall\n \"\"\"\n func = FunctionDef(name = 'PyErr_SetString',\n body = [],\n arguments = [Variable(dtype = PyccelPyObject(), name = 'o'),\n Variable(dtype = NativeString(), name = 's')],\n results = [])\n\n exception = Variable(PyccelPyObject(), name = exception)\n\n return FunctionCall(func, [exception, message])\n\n\ndef generate_datatype_error(variable):\n \"\"\"\n Generate TypeError exception from the variable information (datatype, precision)\n Parameters:\n ----------\n variable : Variable\n\n Returns:\n -------\n func : FunctionCall\n call to PyErr_SetString with TypeError as exception and custom message\n \"\"\"\n dtype = variable.dtype\n\n if isinstance(dtype, NativeBool):\n precision = ''\n if isinstance(dtype, NativeComplex):\n precision = '{} bit '.format(variable.precision * 2 * 8)\n else:\n precision = '{} bit '.format(variable.precision * 8)\n\n message = '\"Argument must be {precision}{dtype}\"'.format(\n precision = precision,\n dtype = variable.dtype)\n return PyErr_SetString('PyExc_TypeError', message)\n\n\n# Functions definitions are defined in pyccel/stdlib/cwrapper/cwrapper.c\ncheck_type_registry = {\n (NativeBool(), 4) : 'PyIs_Bool',\n (NativeInteger(), 1) : 'PyIs_Int8',\n (NativeInteger(), 2) : 'PyIs_Int16',\n (NativeInteger(), 4) : 'PyIs_Int32',\n (NativeInteger(), 8) : 'PyIs_Int64',\n (NativeReal(), 4) : 'PyIs_Float',\n (NativeReal(), 8) : 'PyIs_Double',\n (NativeComplex(), 4) : 'PyIs_Complex64',\n (NativeComplex(), 8) : 'PyIs_Complex128'}\n\ndef scalar_object_check(py_object, c_object):\n \"\"\"\n Create FunctionCall responsible for checking python argument data type\n Parameters:\n ----------\n py_object : Variable\n The python argument of the check function\n c_object : Variable\n The variable needed for the generation of the type check\n precision_check : Boolean\n True if checking the exact precision is needed\n Returns\n -------\n FunctionCall : Check type FunctionCall\n \"\"\"\n\n try :\n check_type = check_type_registry[c_object.dtype, c_object.precision]\n except KeyError:\n errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal')\n\n check_func = FunctionDef(name = check_type,\n body = [],\n arguments = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)],\n results = [Variable(dtype=NativeBool(), name = 'r')])\n\n return FunctionCall(check_func, [py_object])\n\n# This registry is used for interface management,\n# mapping each data type to a given flag\n# Those flags are used in a bitset #TODO\nflags_registry = {\n (NativeInteger(), 4) : 1,\n (NativeInteger(), 8) : 2,\n (NativeInteger(), 2) : 3,\n (NativeInteger(), 1) : 4,\n (NativeReal(), 8) : 5,\n (NativeReal(), 4) : 6,\n (NativeComplex(), 4) : 7,\n (NativeComplex(), 8) : 8,\n (NativeBool(), 4) : 9,\n (NativeString(), 0) : 10\n}\n","sub_path":"pyccel/ast/cwrapper.py","file_name":"cwrapper.py","file_ext":"py","file_size_in_byte":16769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"573458641","text":"import scrapy\nfrom MoviesInfoSpider.items import MoviesinfospiderItem\n\nclass MaoyanSpider(scrapy.Spider):\n name = 'maoyan'\n allowed_domains = ['maoyan.com']\n start_urls = ['https://maoyan.com/films?showType=3']\n\n def parse(self, response):\n # 1.提取当前页面的信息\n # 先分组,后提取\n dd_list = response.xpath('//dd')\n for dd in dd_list:\n item = MoviesinfospiderItem()\n\n item['电影'] = dd.xpath('.//div[@class=\"movie-hover-info\"]/div[1]/span/text()').extract_first().strip()\n item['类型'] = dd.xpath('.//div[@class=\"movie-hover-info\"]/div[2]/text()[2]').extract_first().strip()\n item['上映时间'] = dd.xpath('.//div[@class=\"movie-hover-info\"]/div[4]/text()[2]').extract_first().strip()\n yield item","sub_path":"week02/task1/MoviesInfoSpider/MoviesInfoSpider/spiders/maoyan.py","file_name":"maoyan.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"162758453","text":"# Copyright 2011-2013 GRNET S.A. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGE.\n#\n# The views and conclusions contained in the software and documentation are\n# those of the authors and should not be interpreted as representing official\n# policies, either expressed or implied, of GRNET S.A.\n#\nfrom django.core.management.base import BaseCommand\n\nfrom synnefo.db.models import Backend\nfrom synnefo.logic import backend as backend_mod\n\n\nHELP_MSG = \"\"\"Query Ganeti backends and update the status of backend in DB.\n\nThis command updates:\n * the list of the enabled disk-templates\n * the available resources (disk, memory, CPUs)\n\"\"\"\n\n\nclass Command(BaseCommand):\n help = HELP_MSG\n\n def handle(self, **options):\n for backend in Backend.objects.select_for_update()\\\n .filter(offline=False):\n backend_mod.update_backend_disk_templates(backend)\n backend_mod.update_backend_resources(backend)\n self.stdout.write(\"Successfully updated backend '%s'\\n\" % backend)\n","sub_path":"snf-cyclades-app/synnefo/logic/management/commands/backend-update-status.py","file_name":"backend-update-status.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"126469721","text":"import pandas as pd\nimport numpy as np\nimport yaml\nimport pathlib\nimport os\n\n\nclass DataSeries:\n gno = None\n sno = None\n sname = None\n def __init__(self, df: pd.DataFrame, col_idx, device_name=None):\n if device_name is not None:\n self.device_name = device_name\n else:\n self.device_name = df.columns[col_idx]\n self.occurrence = None\n self.occurrence_node = None\n self.alarm_stats = None\n self.alarm_array = None\n self.lasting_minutes = None\n self.max_mv = None\n self.max_lasting = None\n\n vf = np.vectorize(lambda x: x.timestamp()/60)\n self.dt = vf(df.iloc[2:, col_idx])\n self.voltage = df.iloc[2:, col_idx + 1].to_numpy() # type: np.ndarray\n self.pps = df.iloc[2:, col_idx + 2].to_numpy() # type: np.ndarray\n\n p = pathlib.Path(os.getcwd()).joinpath(\"config.yml\")\n try:\n with open(p, \"r\") as stream:\n data: dict\n data = yaml.load(stream)\n self.ratio_list = data['std_ratio']\n except IOError:\n self.ratio_list = [1, 2, 3]\n self.report_list = dict()\n\n def analyze(self, ratio_factor):\n \"\"\"\n :param ratio_factor: 標準差倍數\n \"\"\"\n threshold = self.get_voltage_threshold(ratio_factor)\n self.alarm_array = np.where(self.voltage >= threshold, 1, 0) # 將高於門檻值標示成 1 否則為 0\n self.occurrence = np.diff(self.alarm_array) # 清理轉換成元素差值 n+1 - n\n self.occurrence_node = self.occurrence.nonzero()[0] # 取得非零元素的索引形成 tuple\n self.clean_series_endpoint() # 移除無用開頭或結尾處於 Alarm 狀態 -> 無從估計持續時間\n self.alarm_stats = np.reshape(self.occurrence_node, (int(self.occurrence_node.size / 2), 2))\n\n # round((ds.dt[alarm[1]].timestamp() - ds.dt[alarm[0]].timestamp()) / 60))\n vf = np.vectorize(lambda x: self.dt[x])\n self.lasting_minutes = vf(self.alarm_stats[:, 1, ]) - vf(self.alarm_stats[:, 0, ])\n return self.alarm_stats\n\n def get_voltage_threshold(self, ratio_factor): # 門檻值必須 >= 5\n threshold = self.voltage.mean() + ratio_factor * self.voltage.std()\n\n if threshold <= 5:\n return 5\n\n if self.max_mv is not None and self.max_mv != 0 and threshold >= self.max_mv:\n return self.max_mv\n\n return threshold\n\n def get_max_lasting_minutes(self): # 持續時間必須介於 20 ~ self.max_lasting min\n max_lasting_minute = np.amax(self.lasting_minutes)\n if max_lasting_minute <= 20:\n return 20\n\n if self.max_lasting is not None and self.max_lasting != 0 and max_lasting_minute >= self.max_lasting:\n return self.max_lasting\n\n return max_lasting_minute\n\n def get_occurrence_node_mapping(self):\n return np.fromfunction(lambda i, j: self.occurrence[self.alarm_stats[i, j]], (int(self.occurrence_node.size / 2), 2), dtype=int)\n\n def clean_series_endpoint(self):\n try:\n if self.occurrence[self.occurrence_node[0]] == -1: # 開頭就處於觸發狀態\n self.occurrence_node = np.insert(self.occurrence_node, 0, 0) # 標示數列開頭是節點\n if self.occurrence[self.occurrence_node[-1]] == 1:\n self.occurrence_node = np.append(self.occurrence_node, len(self.occurrence)-1) # 標示數列尾端是節點\n except IndexError:\n raise IndexError(\"無法計算時間閾值!\")\n\n def report(self):\n print(\"\\ngroup no: {} - group name: {}, station no: {}, station name: {}\".format(\n self.gno,\n self.device_name,\n self.sno if self.sno is not None else \" \",\n self.sname if self.sname is not None else \" \",\n ))\n self.report_list[self.device_name] = dict()\n for ratio in self.ratio_list:\n try:\n self.analyze(ratio)\n except IndexError as e:\n print(\"Ratio={} - {}\".format(ratio, str(e)))\n continue\n self.report_list[self.device_name][str(ratio)] = [\n \"{0:.1f}\".format(self.voltage.mean()),\n \"{0:.1f}\".format(self.voltage.std()),\n \"{0:.1f}\".format(self.get_voltage_threshold(ratio)),\n \"{0:.1f}\".format(self.get_max_lasting_minutes()),\n str(np.sum(np.where(self.lasting_minutes >= self.get_max_lasting_minutes(), 1, 0), dtype=np.int32))]\n\n print(\"mean: {0:>6.1f}, std: {1:>6.2f}, threshold: {2:>6.2f}, Tmax: {3:>6.0f}, Alarm #: {4:>4}\".format(\n self.voltage.mean(),\n self.voltage.std(),\n self.get_voltage_threshold(ratio),\n self.get_max_lasting_minutes(),\n np.sum(np.where(self.lasting_minutes >= self.get_max_lasting_minutes(), 1, 0), dtype=np.int32)))\n\n\nclass DataImporter:\n\n @classmethod\n def xls_import(cls, file_name):\n data = pd.read_excel(file_name)\n return data\n","sub_path":"pdstats/data_import.py","file_name":"data_import.py","file_ext":"py","file_size_in_byte":5278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"629370954","text":"from CleanCsvFiles import Cleaning\nfrom DataAnalysis import AnalyseData, Plot\n\nif __name__ == \"__main__\":\n \"\"\"Calling all the needed function\"\"\"\n \n df = Cleaning().import_from_csv()\n df = Cleaning().check_space(df)\n df = Cleaning().delete_duplicate(df)\n df = Cleaning().remplace_NaN_value(df)\n #df = Cleaning().clean_errors(df)\n df = Cleaning().change_HOUSE_to_Maison(df)\n\n AnalyseData().how_many_row_and_columns(df)\n AnalyseData().describe_of_values(df)\n #Plot().plot_home_type_by_quantity(df)\n #Plot().plot_surface_of_type_price(df)\n #Plot().proportions_of_home_type(df)\n #Plot().home_type_price_dispersion(df)\n #Plot().home_type_surface_dispersion(df)\n #Plot().distribution_of_surface(df)\n #Plot().distribution_of_price(df)\n #Plot().plot_price_area(df)\n #Plot().plot_price_room(df)\n #Plot().state_of_building(df)\n #Plot().city_dispersion(df)\n #Plot().mean_price_by_city(df)\n #Plot().most_and_less_expensive_municipality_wallonia(df)\n #Plot().median_price_municipality_wall(df)\n #Plot().price_per_square_metre_municipality_wall(df)\n #Plot().price_in_flandre(df)\n #Plot().most_and_less_expensive_municipality_flandre(df)\n #Plot().median_price_municipality_flandre(df)\n Plot().price_per_square_metre_municipality_flandre(df)\n #Plot().belguim_house_price(df)","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"265791942","text":"from typing import Set\n\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom kbgen.load_tensor_tools import load_single_adjacency_matrix, load_types_npz\nfrom .interfaces import LearnProcess, ResultCollector\n\n\nclass MultiTypeLearnProcess(LearnProcess):\n def __init__(self, dense_entity_types: np.ndarray, **kwargs):\n super(MultiTypeLearnProcess, self).__init__(**kwargs)\n self.dense_entity_types = dense_entity_types\n\n def learn_distributions(self, relation_id: int):\n \"\"\"\n For an in-depth explanation take a loot at the single core implementation in the M1-Model itself.\n :param relation_id: the relation id for which the features are learned\n \"\"\"\n adjacency_matrix = load_single_adjacency_matrix(self.input_dir, relation_id)\n\n num_edges = adjacency_matrix.nnz\n subject_ids_row = adjacency_matrix.row\n object_ids_row = adjacency_matrix.col\n\n distinct_multi_types = set()\n\n for index in range(num_edges):\n subject_id = subject_ids_row[index]\n object_id = object_ids_row[index]\n\n multi_type, = self.dense_entity_types[subject_id].nonzero()\n distinct_multi_types.add(frozenset(multi_type))\n\n multi_type, = self.dense_entity_types[object_id].nonzero()\n distinct_multi_types.add(frozenset(multi_type))\n\n self.result_queue.put(distinct_multi_types)\n\n\nclass MultiTypeResultCollector(ResultCollector):\n \"\"\"\n For an in-depth explanation take a loot at the single core implementation in the M1-Model itself.\n \"\"\"\n def __init__(self, input_dir: str):\n self.input_dir = input_dir\n self.distinct_multi_types: Set[frozenset] = set()\n\n entity_types = load_types_npz(self.input_dir)\n self.dense_entity_types = entity_types.toarray()\n\n def handle_result(self, result: Set[frozenset]):\n self.distinct_multi_types = self.distinct_multi_types.union(result)\n\n def build_model(self):\n multi_type_index = {}\n for multi_type in self.distinct_multi_types:\n multi_type_index[multi_type] = len(multi_type_index)\n\n return multi_type_index\n\n","sub_path":"kbgen/kb_models/multiprocessing/multitype_index.py","file_name":"multitype_index.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"125052191","text":"# -*- coding: utf-8 -*-\n\"\"\"Defines fixtures available to all tests.\"\"\"\n\nimport pytest\nfrom webtest import TestApp\n\nfrom app import create_app\nfrom app import db as _db\n\nfrom .factories import PersonFactory, RoleFactory\n\n@pytest.fixture\ndef app():\n app = create_app('default')\n context = app.app_context()\n context.push()\n yield app\n context.pop()\n\n@pytest.fixture\ndef testapp(app):\n \"\"\"A Webtest app.\"\"\"\n return TestApp(app)\n\n\n@pytest.fixture\ndef request_context(app):\n \"\"\"A Request Context (for when request-specific information is needed in scope).\"\"\"\n with app.test_request_context() as ctx:\n yield ctx\n\n\n@pytest.fixture\ndef db(app):\n \"\"\"A database for the tests.\"\"\"\n _db.app = app\n _db.create_all()\n\n yield _db\n\n # Explicitly close DB connection\n _db.session.close()\n _db.drop_all()\n\n\n@pytest.fixture\ndef person(db):\n \"\"\"A user for the tests.\"\"\"\n person = PersonFactory()\n db.session.commit()\n return person\n\n@pytest.fixture\ndef blocked_role(db):\n \"\"\"A blocked role for the tests.\"\"\"\n role = RoleFactory(name='blocked')\n db.session.commit()\n\n return role\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"570775386","text":"# coding=utf-8\n\n'''\nScript for testing Scikit and Keras\n'''\n\n\nimport argparse\nimport sys\nfrom importlib import reload\nreload(sys)\n# sys.setdefaultencoding('utf-8')\t#necessary to avoid unicode errors\nimport os\nimport re\nimport numpy as np\nfrom os import listdir\nfrom os.path import isfile, join\nfrom os import walk\nimport pyexcel as p\nimport csv\n\n## Keras\n\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.wrappers.scikit_learn import KerasRegressor\n\n## Sklearn\n\nfrom sklearn import svm\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import datasets\nfrom scipy.stats import pearsonr\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\nfrom tqdm import tqdm\n\ndef create_arg_parser():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"-f1\", required = True, type=str, help=\"Input folder\")\n\targs = parser.parse_args()\n\treturn args\n\n\ndef baseline_model(nodes, input_dim):\n\t# create model\n\tmodel = Sequential()\n\tmodel.add(Dense(nodes, input_dim = input_dim, kernel_initializer='normal', activation='relu'))\n\tmodel.add(Dense(1, kernel_initializer='normal'))\n\t# Compile model\n\tmodel.compile(loss='mean_squared_error', optimizer='adam')\n\treturn model\n\n\ndef train_test_pearson(clf, X_train, y_train, X_test, y_test):\n\t'''Function that does fitting and pearson correlation'''\n\tclf.fit(X_train, y_train)\n\tres = clf.predict(X_test)\n\tprint(\"Pearson coefficient: {0}\\n\".format(pearsonr(res,y_test)[0]))\n\n\treturn pearsonr(res, y_test)[0]\n\n\nif __name__ == \"__main__\":\n\targs = create_arg_parser()\n\n\t# arg f1 is now a folder to get the files from (USE ./features/ OTHERWISE CHANGE lang and emotion below because they won't be split up properly!!!)\n\tfilenames = []\n\tfor (dirpath, dirnames, files) in walk(args.f1):\n\t for name in files:\n\t \tfilenames.append(os.path.join(dirpath, name))\n\texcel_data = []\n\n\tfor file in tqdm(filenames):\n\t\t## load dataset ##\n\t\ttask = \"EI-REG\"\n\t\tlang = file.split(\"\\\\\")[0].split(\"/\")[-1] # do this differently if you do not use -f1 = ./features\n\t\temotion = file.split(\"\\\\\")[1] # do this differently if you do not use -f1 = ./features\n\t\tfeat = file.split(\"\\\\\")[-1][:-4]\n\t\tprint(task, lang, emotion, feat)\n\t\tdataset = np.loadtxt(file, delimiter=\",\", skiprows = 1)\n\n\t\t## split into input (X) and output (Y) variables ##\n\t\tX = dataset[:,0:-1] #select everything but last column (label)\n\t\tY = dataset[:,-1] #select column\n\t\tX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0)\n\n\t\tprint(\"PREDICTIONS \", file)\n\t\t## SVM test ##\n\t\tsvm_clf = svm.SVR()\n\t\tprint('Training SVM...\\n')\n\t\tpearson_svm = train_test_pearson(svm_clf, X_train, y_train, X_test, y_test)\n\n\t\t## Running baseline neural model ##\n\t\tprint('Training neural baseline...\\n')\n\t\tinput_dim = len(X_train[0]) #input dimension is a necessary argument for the baseline model\n\t\testimator = KerasRegressor(build_fn=baseline_model, nodes = 150, input_dim = input_dim, nb_epoch=100, batch_size=5, verbose=0)\n\t\tpearson_neural = train_test_pearson(estimator, X_train, y_train, X_test, y_test)\n\t\tfound = False\n\t\tfor item in excel_data:\n\t\t\tif task in item.values() and lang in item.values() and emotion in item.values():\n\t\t\t\titem[feat] = (float(\"{0:.2f}\".format(pearson_svm)), float(\"{0:.2f}\".format(pearson_neural)))\n\t\t\t\tfound = True\n\t\tif not found:\n\t\t\texcel_data.append({\"task\": task, \"lang\": lang, \"emotion\": emotion, feat: (float(\"{0:.2f}\".format(pearson_svm)), float(\"{0:.2f}\".format(pearson_neural)))})\n\t\tprint((float(\"{0:.2f}\".format(pearson_svm)), float(\"{0:.2f}\".format(pearson_neural))))\n\n\n\tkeys = [\"task\", \"lang\", \"emotion\", \"ngrams\", \"ngrams-embeddings\", \"ngrams-lexicons\", \"ngrams-lexicons-embeddings\", \"lexicons\", \"lexicons-embeddings\", \"embeddings\"]\n\twith open('results_rounded_with_translated_en_to_es_test.csv', 'w') as output_file:\n\t dict_writer = csv.DictWriter(output_file, fieldnames=keys, delimiter=\";\")\n\t dict_writer.writeheader()\n\t for row in excel_data:\n\t \tdict_writer.writerow(row)\n\n\n\n### TASK LANG EMOTION FEAT 1 FEAT 2 FEAT 3\n","sub_path":"python_scripts/test_keras_scikit.py","file_name":"test_keras_scikit.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"210526830","text":"from sklearn import datasets, linear_model\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn.metrics import roc_curve, auc\nimport numpy as np\nimport datetime\nimport umap\nfrom .models import KPI, Machine\nimport joblib\n\nimport sqlite3\nimport pandas as pd\n\nfrom .models import Measurements\nfrom .serializers import MeasurementsSerializer\n\n\n\n\n\nclass Calculations:\n\n def __init__(self):\n pass\n \n def CalcUmap(params):\n\n nRowsRead = 1000 \n df1 = pd.read_csv('Control.csv', delimiter=',', nrows=nRowsRead)\n df2 = pd.read_csv('Quality.csv', delimiter='\\t', nrows=nRowsRead)\n df1.dataframeName = 'Control.csv'\n df2.dataframeName = 'Quality.csv'\n df1.drop([col for col in df1.columns if col.startswith('Wickler')],\n axis=1, inplace=True)\n df2 = df2[['Stippe_-3000','Unnamed: 0', 'date']]\n nRow, nCol = df1.shape\n\n print(df1.columns)\n print(f'There are {nRow} rows and {nCol} columns')\n nRow, nCol = df2.shape\n print(f'There are {nRow} rows and {nCol} columns')\n df_quality = df2\n df_control = df1\n df_quality = df_quality.drop(['Unnamed: 0'], axis=1)\n df_control = df_control.drop(['Unnamed: 0'], axis=1)\n arr_control = df_control.drop('date', axis=1)\n arr_quality = df_quality.set_index('date')\n stp_str = 'Stippe_-3000'\n treshold = 47.5\n df_stippe = df_quality[stp_str]\n df_quality[df_quality[stp_str] > treshold][stp_str]\n color = np.where(df_quality[stp_str] > treshold ,'red','black')\n\n df_control = pd.DataFrame(arr_control, columns=df_control.columns, index= df_control.index)\n df_quality = pd.DataFrame(arr_quality, columns=df_quality.columns, index= df_quality.index)\n df_control = df_control.fillna(0)\n df_stippe = df_stippe.fillna(0)\n lasso = linear_model.Lasso(alpha=2, tol=0.2)\n lasso.fit(df_control, df_stippe)\n top_param_lasso = pd.DataFrame([df_control.columns, lasso.coef_]).T\n top_param_lasso.columns = ['param', 'coef']\n # print(top_param_lasso)\n top_param_lasso = top_param_lasso[top_param_lasso['coef'] > 0]\n # top_param_lasso.head()\n\n df_select_lasso = df_control[top_param_lasso['param']]\n print(df_control.columns)\n fit = umap.UMAP(n_neighbors=100, min_dist=0.05, metric='euclidean', random_state=42)\n embedding = fit.fit_transform(df_select_lasso)\n # print(embedding[:1])\n ret = pd.DataFrame(data=embedding, columns=['x', 'y'])\n ret['color']=color\n # print(ret[ret['color']=='red']['x'])\n returns = {\n 'basic_umap': {\n 'black':{\n 'x': ret[ret['color']=='black']['x'],\n 'y': ret[ret['color']=='black']['y'],\n }, \n 'red':{\n 'x': ret[ret['color']=='red']['x'],\n 'y': ret[ret['color']=='red']['y'],\n }\n }\n }\n\n # df_stippe = df_quality[stp_str]\n # df_control = df_control.fillna(0)\n # df_stippe = df_stippe.fillna(0)\n # lasso = linear_model.Lasso(alpha=10.0)\n # lasso.fit(df_control, df_stippe)\n # print(lasso.coef_)\n # print(df_stippe)\n\n\n returns['top_parametr_lasso'] = df_select_lasso.columns\n # df_stippe = pd.DataFrame(df_stippe)\n\n # filename = 'this_afterLasso_umap.sav'\n # embedding_lasso = joblib.load(filename)\n # embedding_lasso = fit.transform(df_select_lasso)\n # print(df_select_lasso.head())\n\n # fit = umap.UMAP(n_neighbors=50, min_dist = 0.99, random_state=42)\n # embedding_lasso = fit.fit_transform(df_select_lasso)\n # ret = pd.DataFrame(data=embedding_lasso, columns=['x', 'y'])\n # ret['color']=color\n # returns['lasso_umap']['black']['x'] = ret[ret['color']=='black']['x']\n # returns['lasso_umap']['black']['y'] = ret[ret['color']=='black']['y']\n # returns['lasso_umap']['red']['x'] = ret[ret['color']=='red']['x']\n # returns['lasso_umap']['red']['y'] = ret[ret['color']=='red']['y']\n\n \n return returns\n\n\n\n def Hist(params):\n\n machine = Machine.objects.get(pk=params)\n objects = KPI.objects.all().filter(werk=machine).filter(yields__lte=100)\n table = {'rezeptur': [], 'yield': [], 'material': [], 'kd_name': []}\n for object in objects:\n table['rezeptur'].append(object.rezeptur)\n table['yield'].append(object.yields)\n table['material'].append(object.material)\n table['kd_name'].append(object.kd_name)\n\n table = pd.DataFrame.from_dict(table)\n mean_rezeptur = table.groupby(['rezeptur']).mean().sort_values(by='yield', ascending=False)\n mean_material = table.groupby(['material']).mean().sort_values(by='yield', ascending=False)\n kd_name_size = table.groupby(['kd_name']).size().sort_values(ascending=False)\n yields_rez = []\n rezeptur = []\n yields_mat = []\n material = []\n number_orders = []\n kd_name = []\n for i,(index,row) in enumerate(mean_rezeptur.iterrows()):\n if i == 10:\n break\n yields_rez.append(int(row['yield']))\n rezeptur.append(index)\n\n for i,(index,row) in enumerate(mean_material.iterrows()):\n if i == 10:\n break\n yields_mat.append(int(row['yield']))\n material.append(index)\n\n for i, (index, row) in enumerate(kd_name_size.iteritems()):\n if i == 10:\n break\n kd_name.append(index)\n number_orders.append(row)\n ret = {\n \"x_rez\": yields_rez,\n \"y_rez\": rezeptur,\n \"x_mat\": yields_mat,\n \"y_mat\": material,\n 'x_kd_name': number_orders,\n 'y_kd': kd_name\n }\n return ret\n\n def LinearReg(params):\n #Сделал так потому что некогда разбираться как работают джанговские модели\n conn = sqlite3.connect('db.sqlite3')\n query = \"SELECT * FROM dataanalysis_measurements where parameter_id=%s or parameter_id =%s\"%(params['Control'][0], params['Quality'][0])\n \n df = pd.read_sql_query(query,conn)\n \n date_X = df[df['parameter_id'] == params['Control'][0]]['date']\n date_Y = df[df['parameter_id'] == params['Quality'][0]]['date']\n\n\n X = df[df['parameter_id'] == params['Control'][0]]\n X = X['value']\n y = df[df['parameter_id'] == params['Quality'][0]]\n y = y['value']\n\n\n X_train = X[:1000].values.reshape(-1,1)\n X_test = X[-1000:].values.reshape(-1,1)\n \n y_train = y[:1000].values.reshape(-1,1)\n y_test = y[-1000:].values.reshape(-1,1)\n\n\n #Не стал париться с подтягиванием уже обученой модельки. Для теста будем сразу по ходу обучать\n regr = linear_model.LinearRegression()\n regr.fit(X_train, y_train)\n y_pred = regr.predict(X_test)\n\n mse = mean_squared_error(y_test, y_pred)\n print('Coefficients: \\n', regr.coef_)\n print(\"Mean squared error: %.2f\" % mean_squared_error(y_test, y_pred))\n \n print('Variance score: %.2f' % r2_score(y_test, y_pred))\n\n fpr, tpr, thresholds = roc_curve(y_test, y_pred, pos_label=2)\n roc_auc = auc(fpr, tpr)\n ret = {\n \"coefc\":regr.coef_.tolist(),\n \"mean_squared_error\": mse,\n \"variance_score\":r2_score(y_test, y_pred),\n \"X_test\":X_test.tolist(),\n \"y_test\":y_test.tolist(),\n \"y_pred\":y_pred.tolist(),\n \"false_positive_rate\":fpr.tolist(),\n \"true_postitve_rate\":tpr.tolist(),\n \"roc_auc\":roc_auc.tolist(),\n \"date\": date_X.tolist()\n }\n return ret\n pass\n","sub_path":"back/dataanalysis/Calculations.py","file_name":"Calculations.py","file_ext":"py","file_size_in_byte":8049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"344927651","text":"import unittest\n\nfrom model.Assignment import Assignment\nfrom integration.AssignmentDao import AssignmentDao\nfrom integration.UserDao import UserDao\nfrom integration.WishDao import WishDao\nfrom test.integration.helpers.DatabaseHelper import reset_database\nfrom test.integration.helpers.WishHelper import WishHelper\nfrom test.integration.helpers.UserHelper import UserHelper\n\n\nclass TestAssignmentDao(unittest.TestCase):\n\n def __init__(self, *args, **kwargs):\n super(TestAssignmentDao, self).__init__(*args, **kwargs)\n self.assignmentDao = AssignmentDao()\n self.userDao = UserDao()\n self.wishDao = WishDao()\n self.wishHelper = WishHelper()\n self.userHelper = UserHelper()\n\n def setUp(self):\n reset_database()\n\n def test_get_assignment_by_wish_id(self):\n # add dummy wish\n wish = self.wishHelper.add_dummy_wish()\n\n # add assignee\n assignee = self.userHelper.add_dummy_user()\n\n # add assignment\n assignment = Assignment()\n assignment.user = assignee\n assignment.wish = wish\n persistent_assignment = self.assignmentDao.add(assignment)\n\n found_assignment = self.assignmentDao.get_assignment_by_wish_id(assignment.wish.id)\n\n assert found_assignment\n assert found_assignment == persistent_assignment\n\n def test_get_assignment_by_wish_id_none_existing_assignment(self):\n found_assignment = self.assignmentDao.get_assignment_by_wish_id(1)\n\n assert found_assignment is None\n\n def test_get_assignments_by_user_id_returns_a_list(self):\n found_assignments = self.assignmentDao.get_assignments_by_user_id(1)\n assert isinstance(found_assignments, list)\n\n def test_get_assignments_by_user_id_no_assignments(self):\n found_assignments = self.assignmentDao.get_assignments_by_user_id(1)\n\n assert isinstance(found_assignments, list)\n assert len(found_assignments) == 0\n\n def test_get_assignments_by_user_id_one_assignment(self):\n # add dummy wish\n wish = self.wishHelper.add_dummy_wish()\n\n # add assignee\n assignee = self.userHelper.add_dummy_user()\n\n # add assignment for wish\n assignment = Assignment()\n assignment.user = assignee\n assignment.wish = wish\n persistent_assignment1 = self.assignmentDao.add(assignment)\n\n found_assignments = self.assignmentDao.get_assignments_by_user_id(assignee.id)\n\n assert len(found_assignments) == 1\n assert found_assignments[0] == persistent_assignment1\n\n def test_get_assignments_by_user_id_many_assignments(self):\n # add dummy wishes\n wish1 = self.wishHelper.add_dummy_wish()\n wish2 = self.wishHelper.add_dummy_wish()\n\n # add assignee\n assignee = self.userHelper.add_dummy_user()\n\n # add assignment for wish1\n assignment1 = Assignment()\n assignment1.user = assignee\n assignment1.wish = wish1\n persistent_assignment1 = self.assignmentDao.add(assignment1)\n\n # add assignment for wish2\n assignment2 = Assignment()\n assignment2.user = assignee\n assignment2.wish = wish2\n persistent_assignment2 = self.assignmentDao.add(assignment2)\n\n found_assignments = self.assignmentDao.get_assignments_by_user_id(assignee.id)\n\n assert len(found_assignments) == 2\n assert (found_assignments[0] == persistent_assignment1 and found_assignments[1] == persistent_assignment2) or \\\n (found_assignments[0] == persistent_assignment2 and found_assignments[1] == persistent_assignment1)\n\n","sub_path":"app/test/integration/integration/test_assignmentDao.py","file_name":"test_assignmentDao.py","file_ext":"py","file_size_in_byte":3610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"622535314","text":"import tkinter as tk\nfrom Eye_Detector import GazeDetector\nimport cv2\nimport os\n\nHEIGHT = 100\nWIDTH = 300\n\nclass GUI():\n \n def __init__(self, root):\n\n self.gazeDetector = GazeDetector()\n \n self.cap=cv2.VideoCapture(0) #Begin webcam capture\n \n self.root = root\n self.root.configure(background='DeepSkyBlue3')\n \n self.canvas = tk.Canvas(root, height=HEIGHT, width=WIDTH)\n\n\n #Left Frame\n self.leftFrame = tk.Frame(self.root) #, fg=\"white\"\n self.leftFrame.place(relwidth=0.33, relheight=1, relx=0, rely=0)\n self.leftFrame.configure(background='DeepSkyBlue3')\n\n rocketPath = os.path.join(\"rocket.png\")\n rocket = tk.PhotoImage(file=rocketPath)\n\n #Left Image\n self.leftImg = tk.Label(self.leftFrame, image=rocket)\n self.leftImg.image = rocket\n self.leftImg.place(relwidth=0.9, relheight=0.5, relx=0.05, rely=0.05)\n self.leftImg.configure(image=rocket)\n \n #Right Emotion Label\n self.leftLabel = tk.Label(self.leftFrame, text=\"\", font=(\"Helvetica\", 20))\n self.leftLabel.place(relwidth=0.9, relheight=0.25, relx=0.05, rely=0.65)\n self.leftLabel.configure(background='DeepSkyBlue3')\n\n\n\n #Middle Frame\n self.middleFrame = tk.Frame(self.root)\n self.middleFrame.place(relwidth=0.34, relheight=1, relx=0.34, rely=0)\n self.middleFrame.configure(background='DeepSkyBlue3')\n\n burgerPath = os.path.join(\"burger.png\")\n burger = tk.PhotoImage(file=burgerPath)\n \n #Middle Image\n self.middleImg = tk.Label(self.middleFrame)\n self.middleImg.image = burger\n self.middleImg.place(relwidth=0.9, relheight=0.5, relx=0.05, rely=0.05)\n self.middleImg.configure(image=burger)\n\n #Middle Emotion Label\n self.middleLabel = tk.Label(self.middleFrame, text=\"\", font=(\"Helvetica\", 20))\n self.middleLabel.place(relwidth=0.9, relheight=0.25, relx=0.05, rely=0.65)\n self.middleLabel.configure(background='DeepSkyBlue3')\n\n\n \n #Right Frame\n self.rightFrame = tk.Frame(self.root)\n self.rightFrame.place(relwidth=0.33, relheight=1, relx=0.68, rely=0)\n self.rightFrame.configure(background='DeepSkyBlue3')\n\n\n puppyPath = os.path.join(\"puppy.png\")\n puppy = tk.PhotoImage(file=puppyPath)\n \n #Right Image\n self.rightImg = tk.Label(self.rightFrame)\n self.rightImg.image = puppy\n self.rightImg.place(relwidth=0.9, relheight=0.5, relx=0.05, rely=0.05)\n self.rightImg.configure(image=puppy)\n \n #Right Emotion Label\n self.rightLabel = tk.Label(self.rightFrame, text=\"\", font=(\"Helvetica\", 20))\n self.rightLabel.place(relwidth=0.9, relheight=0.25, relx=0.05, rely=0.65)\n self.rightLabel.configure(background='DeepSkyBlue3')\n \n\n\n self.frameArray = [self.leftFrame, self.middleFrame, self.rightFrame]\n self.emotionArray = [self.leftLabel, self.middleLabel, self.rightLabel]\n\n\n \n self.mainLoop()\n \n #Colours section of screen you are looking at\n def colourImage(self, hor, emotion):\n for h in range(3):\n if h == hor: \n self.frameArray[h].configure(background='Red')\n self.emotionArray[h].configure(background='Red')\n self.emotionArray[h]['text'] = \"Your emotional response to this image is: \" + emotion\n else:\n self.frameArray[h].configure(background='DeepSkyBlue3') \n self.emotionArray[h].configure(background='DeepSkyBlue3') \n self.emotionArray[h]['text'] = \"\"\n\n def mainLoop(self):\n\n ret,frame=self.cap.read()\n\n hor = self.gazeDetector.getEyeRegion(frame)\n emotion = self.gazeDetector.emotionClassification(frame)\n\n self.colourImage(hor[0], emotion)\n\n self.root.after(300, self.mainLoop) #Don't use () after function name!!\n\nroot = tk.Tk()\n\nGUI = GUI(root)\n\nroot.mainloop()\n\nGUI.cap.release()\ncv2.destroyAllWindows()\n","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"53947506","text":"import typing\nfrom typing import List, Mapping, Union, Optional\nimport itertools\nimport math\nimport collections\nimport copy\nimport operator\nimport warnings\n\nimport attr\nfrom attr.validators import optional\n\nfrom .script import dumps, loads\n\nNumber = Union[int, float]\nID = str\nTime = Number\nSize = Number\nRate = float\nProportion = float\n\n_ISCLOSE_REL_TOL = 1e-9\n_ISCLOSE_ABS_TOL = 1e-12\n\n# Validator functions.\n\n\ndef positive(self, attribute, value):\n if value <= 0:\n raise ValueError(f\"{attribute.name} must be greater than zero\")\n\n\ndef non_negative(self, attribute, value):\n if value < 0:\n raise ValueError(f\"{attribute.name} must be non-negative\")\n\n\ndef finite(self, attribute, value):\n if math.isinf(value):\n raise ValueError(f\"{attribute.name} must be finite\")\n\n\ndef unit_interval(self, attribute, value):\n if not (0 <= value <= 1):\n raise ValueError(f\"must have 0 <= {attribute.name} <= 1\")\n\n\ndef isclose(\n a: Optional[Number],\n b: Optional[Number],\n *,\n rel_tol=_ISCLOSE_REL_TOL,\n abs_tol=_ISCLOSE_ABS_TOL,\n) -> bool:\n \"\"\"\n Wrapper around math.isclose() that handles None.\n \"\"\"\n if None in (a, b):\n return (a,) == (b,)\n else:\n return math.isclose(\n typing.cast(float, a),\n typing.cast(float, b),\n rel_tol=rel_tol,\n abs_tol=abs_tol,\n )\n\n\ndef isclose_deme_proportions(\n a_ids: Optional[List[ID]],\n a_proportions: Optional[List[Proportion]],\n b_ids: Optional[List[ID]],\n b_proportions: Optional[List[Proportion]],\n *,\n rel_tol=_ISCLOSE_REL_TOL,\n abs_tol=_ISCLOSE_ABS_TOL,\n) -> bool:\n \"\"\"\n Returns true if (a_ids, a_proportions) and (b_ids, b_proportions)\n are semantically equivalent. The order of ids is ignored, and proportions\n are checked for numerical closeness.\n \"\"\"\n if None in (a_ids, b_ids):\n return (a_ids, a_proportions) == (b_ids, b_proportions)\n\n a_ids = typing.cast(List[ID], a_ids)\n a_proportions = typing.cast(List[Proportion], a_proportions)\n b_ids = typing.cast(List[ID], b_ids)\n b_proportions = typing.cast(List[Number], b_proportions)\n\n if len(a_ids) != len(b_ids) or len(a_proportions) != len(b_proportions):\n return False\n a = sorted(zip(a_ids, a_proportions), key=operator.itemgetter(0))\n b = sorted(zip(b_ids, b_proportions), key=operator.itemgetter(0))\n for (a_id, a_proportion), (b_id, b_proportion) in zip(a, b):\n if a_id != b_id or not isclose(\n a_proportion, b_proportion, rel_tol=rel_tol, abs_tol=abs_tol\n ):\n return False\n return True\n\n\n@attr.s(auto_attribs=True, kw_only=True)\nclass Epoch:\n \"\"\"\n Population size parameters for a deme in a specified time period.\n Times follow the forwards-in-time convention (time values increase\n from the present towards the past). The start time of the epoch is\n the more ancient time, and the end time is more recent, so that the\n start time must be greater than the end time\n\n :ivar start_time: The start time of the epoch.\n :ivar end_time: The end time of the epoch (must be specified).\n :ivar initial_size: Population size at ``start_time``.\n :ivar final_size: Population size at ``end_time``.\n If ``initial_size != final_size``, the population size changes\n monotonically between the start and end times.\n :ivar size_function: The size change function. Common options are constant,\n exponential, or linear, though any string is valid. Warning: downstream\n simulators might not understand the size_function provided.\n :ivar selfing_rate: An optional selfing rate for this epoch.\n :ivar cloning_rate: An optional cloning rate for this epoch.\n \"\"\"\n\n start_time: Optional[Time] = attr.ib(default=None, validator=optional(non_negative))\n end_time: Time = attr.ib(default=None, validator=[non_negative, finite])\n initial_size: Optional[Size] = attr.ib(\n default=None, validator=optional([positive, finite])\n )\n final_size: Optional[Size] = attr.ib(\n default=None, validator=optional([positive, finite])\n )\n size_function: Optional[str] = attr.ib(default=None)\n selfing_rate: Optional[Proportion] = attr.ib(\n default=None, validator=optional(unit_interval)\n )\n cloning_rate: Optional[Proportion] = attr.ib(\n default=None, validator=optional(unit_interval)\n )\n\n def __attrs_post_init__(self):\n if self.initial_size is None and self.final_size is None:\n raise ValueError(\"must set either initial_size or final_size\")\n if (\n self.start_time is not None\n and self.end_time is not None\n and self.start_time <= self.end_time\n ):\n raise ValueError(\"must have start_time > end_time\")\n if (\n self.start_time is not None\n and self.initial_size is not None\n and self.final_size is not None\n ):\n if math.isinf(self.start_time) and self.initial_size != self.final_size:\n raise ValueError(\"if start time is inf, must be a constant size epoch\")\n\n @property\n def time_span(self):\n \"\"\"\n The time span of the epoch.\n \"\"\"\n return self.start_time - self.end_time\n\n def isclose(\n self,\n other,\n *,\n rel_tol=_ISCLOSE_REL_TOL,\n abs_tol=_ISCLOSE_ABS_TOL,\n ) -> bool:\n return (\n self.__class__ is other.__class__\n and isclose(\n self.start_time, other.start_time, rel_tol=rel_tol, abs_tol=abs_tol\n )\n and isclose(self.end_time, other.end_time, rel_tol=rel_tol, abs_tol=abs_tol)\n and isclose(\n self.initial_size, other.initial_size, rel_tol=rel_tol, abs_tol=abs_tol\n )\n and isclose(\n self.final_size, other.final_size, rel_tol=rel_tol, abs_tol=abs_tol\n )\n and self.size_function == other.size_function\n and isclose(\n self.selfing_rate, other.selfing_rate, rel_tol=rel_tol, abs_tol=abs_tol\n )\n and isclose(\n self.cloning_rate, other.cloning_rate, rel_tol=rel_tol, abs_tol=abs_tol\n )\n )\n\n\n@attr.s(auto_attribs=True, kw_only=True)\nclass Migration:\n \"\"\"\n Parameters for continuous migration from one deme to another.\n Source and destination demes follow the forwards-in-time convention,\n of migrations born in the source deme having children in the dest\n deme.\n\n :ivar source: The source deme.\n :ivar dest: The destination deme.\n :ivar start_time: The time at which the migration rate becomes activate.\n :ivar end_time: The time at which the migration rate is deactivated.\n :ivar rate: The rate of migration. Set to zero to disable migrations after\n the given time.\n \"\"\"\n\n source: ID = attr.ib()\n dest: ID = attr.ib()\n start_time: Time = attr.ib(validator=non_negative)\n end_time: Time = attr.ib(validator=[non_negative, finite])\n rate: Rate = attr.ib(validator=[non_negative, finite])\n\n def __attrs_post_init__(self):\n if self.source == self.dest:\n raise ValueError(\"source and dest cannot be the same deme\")\n\n def isclose(\n self,\n other,\n *,\n rel_tol=_ISCLOSE_REL_TOL,\n abs_tol=_ISCLOSE_ABS_TOL,\n ) -> bool:\n return (\n self.__class__ is other.__class__\n and self.source == other.source\n and self.dest == other.dest\n and isclose(\n self.start_time, other.start_time, rel_tol=rel_tol, abs_tol=abs_tol\n )\n and isclose(self.end_time, other.end_time, rel_tol=rel_tol, abs_tol=abs_tol)\n and isclose(self.rate, other.rate, rel_tol=rel_tol, abs_tol=abs_tol)\n )\n\n\n@attr.s(auto_attribs=True, kw_only=True)\nclass Pulse:\n \"\"\"\n Parameters for a pulse of migration from one deme to another.\n Source and destination demes follow the forwards-in-time convention,\n of migrations born in the source deme having children in the dest\n deme.\n\n :ivar source: The source deme.\n :ivar dest: The destination deme.\n :ivar time: The time of migration.\n :ivar proportion: At the instant after migration, this is the proportion\n of individuals in the destination deme made up of individuals from\n the source deme.\n \"\"\"\n\n source: ID = attr.ib()\n dest: ID = attr.ib()\n time: Time = attr.ib(validator=[non_negative, finite])\n proportion: Proportion = attr.ib(validator=unit_interval)\n\n def __attrs_post_init__(self):\n if self.source == self.dest:\n raise ValueError(\"source and dest cannot be the same deme\")\n\n def isclose(\n self,\n other,\n *,\n rel_tol=_ISCLOSE_REL_TOL,\n abs_tol=_ISCLOSE_ABS_TOL,\n ) -> bool:\n return (\n self.__class__ is other.__class__\n and self.source == other.source\n and self.dest == other.dest\n and isclose(self.time, other.time, rel_tol=rel_tol, abs_tol=abs_tol)\n and isclose(\n self.proportion, other.proportion, rel_tol=rel_tol, abs_tol=abs_tol\n )\n )\n\n\n@attr.s(auto_attribs=True, kw_only=True)\nclass Split:\n \"\"\"\n Parameters for a split event, in which a deme ends at a given time and\n contributes ancestry to an arbitrary number of descendant demes. Note\n that there could be just a single descendant deme, in which case ``split``\n is a bit of a misnomer...\n\n :ivar parent: The parental deme.\n :ivar children: A list of descendant demes.\n :ivar time: The split time.\n \"\"\"\n\n parent: ID = attr.ib()\n children: List[ID] = attr.ib()\n time: Time = attr.ib(validator=[non_negative, finite])\n\n def __attrs_post_init__(self):\n if not isinstance(self.children, list):\n raise ValueError(\"children of split must be passed as a list\")\n for child in self.children:\n if child == self.parent:\n raise ValueError(\"child and parent cannot be the same deme\")\n\n def isclose(\n self,\n other,\n *,\n rel_tol=_ISCLOSE_REL_TOL,\n abs_tol=_ISCLOSE_ABS_TOL,\n ) -> bool:\n return (\n self.__class__ is other.__class__\n and self.parent == other.parent\n and sorted(self.children) == sorted(other.children)\n and isclose(self.time, other.time, rel_tol=rel_tol, abs_tol=abs_tol)\n )\n\n\n@attr.s(auto_attribs=True, kw_only=True)\nclass Branch:\n \"\"\"\n Parameters for a branch event, where a new deme branches off from a parental\n deme. The parental deme need not end at that time.\n\n :ivar parent: The parental deme.\n :ivar child: The descendant deme.\n :ivar time: The branch time.\n \"\"\"\n\n parent: ID = attr.ib()\n child: ID = attr.ib()\n time: Time = attr.ib(validator=[non_negative, finite])\n\n def __attrs_post_init__(self):\n if self.child == self.parent:\n raise ValueError(\"child and parent cannot be the same deme\")\n\n def isclose(\n self,\n other,\n *,\n rel_tol=_ISCLOSE_REL_TOL,\n abs_tol=_ISCLOSE_ABS_TOL,\n ) -> bool:\n return (\n self.__class__ is other.__class__\n and self.parent == other.parent\n and self.child == other.child\n and isclose(self.time, other.time, rel_tol=rel_tol, abs_tol=abs_tol)\n )\n\n\n@attr.s(auto_attribs=True, kw_only=True)\nclass Merge:\n \"\"\"\n Parameters for a merge event, in which two or more demes end at some time and\n contribute to a descendant deme.\n\n :ivar parents: A list of parental demes.\n :ivar proportions: A list of ancestry proportions, in order of `parents`.\n :ivar child: The descendant deme.\n :ivar time: The merge time.\n \"\"\"\n\n parents: List[ID] = attr.ib()\n proportions: List[Proportion] = attr.ib()\n child: ID = attr.ib()\n time: Time = attr.ib(validator=[non_negative, finite])\n\n def __attrs_post_init__(self):\n if not isinstance(self.parents, list):\n raise ValueError(\"parents must be passed as a list\")\n if not isinstance(self.proportions, list):\n raise ValueError(\"proportions must be passed as a list\")\n if len(self.parents) < 2:\n raise ValueError(\"merge must involve at least two ancestors\")\n if math.isclose(sum(self.proportions), 1) is False:\n raise ValueError(\"proportions must sum to 1\")\n if len(self.parents) != len(self.proportions):\n raise ValueError(\"parents and proportions must have same length\")\n if self.child in self.parents:\n raise ValueError(\"merged deme cannot be its own ancestor\")\n if len(set(self.parents)) != len(self.parents):\n raise ValueError(\"cannot repeat parents in merge\")\n\n def isclose(\n self,\n other,\n *,\n rel_tol=_ISCLOSE_REL_TOL,\n abs_tol=_ISCLOSE_ABS_TOL,\n ) -> bool:\n return (\n self.__class__ is other.__class__\n and isclose_deme_proportions(\n self.parents,\n self.proportions,\n other.parents,\n other.proportions,\n rel_tol=rel_tol,\n abs_tol=abs_tol,\n )\n and self.child == other.child\n and isclose(self.time, other.time, rel_tol=rel_tol, abs_tol=abs_tol)\n )\n\n\n@attr.s(auto_attribs=True, kw_only=True)\nclass Admix:\n \"\"\"\n Parameters for an admixture event, where two or more demes contribute ancestry\n to a new deme.\n\n :ivar parents: A list of source demes.\n :ivar proportions: A list of ancestry proportions, in order of `parents`.\n :ivar child: The admixed deme.\n :ivar time: The admixture time.\n \"\"\"\n\n parents: List[ID] = attr.ib()\n proportions: List[Proportion] = attr.ib()\n child: ID = attr.ib()\n time: Time = attr.ib(validator=[non_negative, finite])\n\n def __attrs_post_init__(self):\n if not isinstance(self.parents, list):\n raise ValueError(\"parents must be passed as a list\")\n if not isinstance(self.proportions, list):\n raise ValueError(\"proportions must be passed as a list\")\n if len(self.parents) < 2:\n raise ValueError(\"admixture must involve at least two ancestors\")\n if math.isclose(sum(self.proportions), 1) is False:\n raise ValueError(\"Proportions must sum to 1\")\n if len(self.parents) != len(self.proportions):\n raise ValueError(\"parents and proportions must have same length\")\n if self.child in self.parents:\n raise ValueError(\"admixed deme cannot be its own ancestor\")\n if len(set(self.parents)) != len(self.parents):\n raise ValueError(\"cannot repeat parents in admixure\")\n\n def isclose(\n self,\n other,\n *,\n rel_tol=_ISCLOSE_REL_TOL,\n abs_tol=_ISCLOSE_ABS_TOL,\n ) -> bool:\n return (\n self.__class__ is other.__class__\n and isclose_deme_proportions(\n self.parents,\n self.proportions,\n other.parents,\n other.proportions,\n rel_tol=rel_tol,\n abs_tol=abs_tol,\n )\n and self.child == other.child\n and isclose(self.time, other.time, rel_tol=rel_tol, abs_tol=abs_tol)\n )\n\n\n@attr.s(auto_attribs=True, kw_only=True)\nclass Deme:\n \"\"\"\n A collection of individuals that are exchangeable at any fixed time.\n This class is not intended to be instantiated directly. It is instead\n recommended to add demes to a :class:`.DemeGraph` object using the\n :meth:`DemeGraph.deme` method.\n\n :ivar str id: A string identifier for the deme.\n :ivar str description: A description of the deme. May be ``None``.\n :ivar ancestors: List of string identifiers for the deme's ancestors.\n This may be ``None``, indicating the deme has no ancestors.\n :vartype ancestors: list of str\n :ivar proportions: If ``ancestors`` is not ``None``, this indicates the\n proportions of ancestry from each ancestor. This list has the same\n length as ``ancestors``, and must sum to 1.\n :vartype proportions: list of float\n :ivar epochs: A list of epochs, which define the population size(s) of\n the deme. The deme must be initially created with exactly one epoch.\n Additional epochs may be added with :meth:`.add_epoch`\n :vartype epochs: list of :class:`.Epoch`\n :ivar float selfing_rate: An optional selfing rate for this deme.\n :ivar float cloning_rate: An optional cloning rate for this deme.\n \"\"\"\n\n id: ID = attr.ib()\n description: str = attr.ib()\n ancestors: List[ID] = attr.ib()\n proportions: List[Proportion] = attr.ib()\n epochs: List[Epoch] = attr.ib()\n selfing_rate: Optional[Proportion] = attr.ib(\n default=None, validator=optional([unit_interval])\n )\n cloning_rate: Optional[Proportion] = attr.ib(\n default=None, validator=optional([unit_interval])\n )\n\n @epochs.validator\n def _check_epochs(self, attribute, value):\n if len(self.epochs) != 1:\n raise ValueError(\n \"Deme must be created with exactly one epoch.\"\n \"Use add_epoch() to supply additional epochs.\"\n )\n\n def __attrs_post_init__(self):\n if self.ancestors is not None:\n if not isinstance(self.ancestors, (list, tuple)):\n raise TypeError(\"ancestors must be a list of deme IDs\")\n if len(set(self.ancestors)) != len(self.ancestors):\n raise ValueError(f\"duplicate ancestors in {self.ancestors}\")\n if len(self.ancestors) > 1 and self.proportions is None:\n raise ValueError(\"proportions must be set if more than one ancestor\")\n if len(self.ancestors) != len(self.proportions):\n raise ValueError(\"ancestors and proportions must have same length\")\n if self.id in self.ancestors:\n raise ValueError(f\"{self.id} cannot be its own ancestor\")\n # if selfing or cloning rates are not given, set them to deme's default rate\n epoch = self.epochs[0]\n if epoch.selfing_rate is None:\n epoch.selfing_rate = self.selfing_rate\n if epoch.cloning_rate is None:\n epoch.cloning_rate = self.cloning_rate\n\n def isclose(\n self,\n other,\n *,\n rel_tol=_ISCLOSE_REL_TOL,\n abs_tol=_ISCLOSE_ABS_TOL,\n ) -> bool:\n return (\n self.__class__ is other.__class__\n and self.id == other.id\n and isclose_deme_proportions(\n self.ancestors,\n self.proportions,\n other.ancestors,\n other.proportions,\n rel_tol=rel_tol,\n abs_tol=abs_tol,\n )\n and all(\n e1.isclose(e2, rel_tol=rel_tol, abs_tol=abs_tol)\n for e1, e2 in zip(self.epochs, other.epochs)\n )\n )\n\n def add_epoch(self, epoch: Epoch):\n \"\"\"\n Add an epoch to the deme's epoch list.\n Epochs must be non overlapping and added in time-decreasing order, i.e.\n starting with the most ancient epoch and adding epochs sequentially toward\n the present.\n\n :param epoch: The epoch to add.\n :type epoch: :class:`.Epoch`\n \"\"\"\n assert len(self.epochs) > 0\n # if the epoch start time is not given, it equals the previous epoch's end time\n prev_epoch = self.epochs[-1]\n if epoch.start_time is None:\n epoch.start_time = prev_epoch.end_time\n elif epoch.start_time > prev_epoch.end_time:\n raise ValueError(\n \"epochs must be non overlapping and added in time-decreasing order\"\n )\n if prev_epoch.end_time != epoch.start_time:\n raise ValueError(\"cannot have gap between consecutive epochs\")\n if epoch.time_span <= 0:\n raise ValueError(\"epoch must exist for some positive time\")\n # implicitly set the initial and final sizes, if not given\n if epoch.initial_size is None:\n epoch.initial_size = prev_epoch.final_size\n if epoch.final_size is None:\n epoch.final_size = epoch.initial_size\n # check or assign the size function over this epoch\n if epoch.size_function is None:\n if epoch.initial_size == epoch.final_size:\n epoch.size_function = \"constant\"\n else:\n epoch.size_function = \"exponential\"\n else:\n # check if constant function is correct\n if (\n epoch.size_function == \"constant\"\n and epoch.initial_size != epoch.final_size\n ):\n raise ValueError(\n \"epoch size function is constant but initial and \"\n \"final sizes are not equal\"\n )\n # if selfing or cloning rates are not given, set them to deme's default rate\n if epoch.selfing_rate is None:\n epoch.selfing_rate = self.selfing_rate\n if epoch.cloning_rate is None:\n epoch.cloning_rate = self.cloning_rate\n self.epochs.append(epoch)\n\n @property\n def start_time(self):\n \"\"\"\n The start time of the deme's existence.\n \"\"\"\n return self.epochs[0].start_time\n\n @property\n def end_time(self):\n \"\"\"\n The end time of the deme's existence.\n \"\"\"\n return self.epochs[-1].end_time\n\n @property\n def time_span(self):\n \"\"\"\n The time span over which the deme exists.\n \"\"\"\n return self.start_time - self.end_time\n\n\n@attr.s(auto_attribs=True, kw_only=True)\nclass DemeGraph:\n \"\"\"\n The DemeGraph class provides a high-level API for constructing a demographic\n model. The methods on this class ensure validity of a model at all stages\n of construction. They also allow omission of detail, when there is a single\n unambiguous interpretation (or a very sensible default). The semantics\n exactly match those for loading the ``yaml`` file, as the :func:`.load`\n function uses this API internally.\n\n :ivar str description: A human readable description of the demography.\n :ivar str time_units: The units of time used for the demography. This is\n commonly ``years`` or ``generations``, but can be any string.\n This field is intended to be useful for documenting a demography,\n but the actual value provided here should not be relied upon.\n :ivar float generation_time: The generation time of demes, in units given\n by the ``time_units`` parameter. Concretely, dividing all times\n by ``generation_time`` will convert the deme graph to have time\n units in generations. If ``generation_time`` is ``None``, the units\n are assumed to be in generations already.\n See also: :meth:`.in_generations`.\n :ivar str doi: If the deme graph describes a published demography, the DOI\n should be be given here. May be ``None``.\n :ivar demes: A list of demes in the demography.\n Not intended to be passed when the deme graph is instantiated.\n Use :meth:`.deme` instead.\n :vartype demes: list of :class:`.Deme`\n :ivar migrations: A list of continuous migrations for the demography.\n Not intended to be passed when the deme graph is instantiated.\n Use :meth:`migration` or :meth:`symmetric_migration` instead.\n :vartype migrations: list of :class:`.Migration`\n :ivar pulses: A list of migration pulses for the demography.\n Not intended to be passed when the deme graph is instantiated.\n Use :meth:`pulse` instead.\n :vartype pulses: list of :class:`.Pulse`\n \"\"\"\n\n description: str = attr.ib()\n time_units: str = attr.ib()\n generation_time: Optional[Time] = attr.ib(\n default=None, validator=optional([positive, finite])\n )\n doi: Optional[str] = attr.ib(default=None)\n demes: List[Deme] = attr.ib(factory=list)\n migrations: List[Migration] = attr.ib(factory=list)\n pulses: List[Pulse] = attr.ib(factory=list)\n splits: List[Split] = attr.ib(factory=list)\n branches: List[Branch] = attr.ib(factory=list)\n mergers: List[Merge] = attr.ib(factory=list)\n admixtures: List[Admix] = attr.ib(factory=list)\n selfing_rate: Proportion = attr.ib(default=None)\n cloning_rate: Proportion = attr.ib(default=None)\n\n def __attrs_post_init__(self):\n self._deme_map: Mapping[ID, Deme] = dict()\n\n def __getitem__(self, deme_id):\n \"\"\"\n Return the :class:`.Deme` with the specified id.\n \"\"\"\n return self._deme_map[deme_id]\n\n def __contains__(self, deme_id):\n \"\"\"\n Check if the deme graph contains a deme with the specified id.\n \"\"\"\n return deme_id in self._deme_map\n\n def isclose(\n self,\n other,\n *,\n rel_tol=_ISCLOSE_REL_TOL,\n abs_tol=_ISCLOSE_ABS_TOL,\n ) -> bool:\n \"\"\"\n Returns true if the deme graph and ``other`` implement essentially\n the same demographic model. Numerical values are compared using the\n :func:`math.isclose` function, from which this method takes its name.\n Furthermore, the following implementation details are ignored during\n the comparison:\n\n - The deme graph's ``description`` and ``doi`` attributes.\n - The order in which ``migrations`` were specified.\n - The order in which admixture ``pulses`` were specified.\n - The order in which ``demes`` were specified.\n - The order in which a deme's ``ancestors`` were specified.\n - The ``selfing_rate`` and ``cloning_rate`` attributes of the deme\n graph, or of the demes (if any). Theses attributes are considered\n conveniences, and are propagated to the relevant demes'\n epochs. The ``selfing_rate`` and ``cloning_rate`` attributes of\n each epoch *are* evaluated for equality between the two models.\n\n :param other: The deme graph to compare against.\n :type other: :class:`.DemeGraph`\n :param float rel_tol: The relative tolerance permitted for numerical\n comparisons. See documentation for :func:`math.isclose`.\n :param float abs_tol: The absolute tolerance permitted for numerical\n comparisons. See documentation for :func:`math.isclose`.\n :return: True if the two graphs implement the same model, False otherwise.\n :rtype: bool\n \"\"\"\n\n def sorted_eq(aa, bb, *, rel_tol, abs_tol) -> bool:\n # Order-agnostic equality check.\n if len(aa) != len(bb):\n return False\n for (a, b) in zip(sorted(aa), sorted(bb)):\n if not a.isclose(b, rel_tol=rel_tol, abs_tol=abs_tol):\n return False\n return True\n\n return (\n self.__class__ is other.__class__\n and self.time_units == other.time_units\n and self.generation_time == other.generation_time\n and sorted_eq(self.demes, other.demes, rel_tol=rel_tol, abs_tol=abs_tol)\n and sorted_eq(\n self.migrations, other.migrations, rel_tol=rel_tol, abs_tol=abs_tol\n )\n and sorted_eq(self.pulses, other.pulses, rel_tol=rel_tol, abs_tol=abs_tol)\n )\n\n def deme(\n self,\n id,\n *,\n description=None,\n ancestors=None,\n proportions=None,\n start_time=None,\n end_time=None,\n initial_size=None,\n final_size=None,\n epochs=None,\n selfing_rate=None,\n cloning_rate=None,\n ):\n \"\"\"\n Add a deme to the graph, with lifetime ``(start_time, end_time]``.\n\n :param str id: A string identifier for the deme.\n :param ancestors: List of string identifiers for the deme's ancestors.\n This may be ``None``, indicating the deme has no ancestors.\n If the deme has multiple ancestors, the ``proportions`` parameter\n must also be provided.\n :type ancestors: list of str\n :param list proportions: A list of ancestry proportions for ``ancestors``.\n This list has the same length as ``ancestors``, and must sum to ``1.0``.\n May be omitted if the deme has only one, or zero, ancestors.\n :type proportions: list of float\n :param float start_time: The time at which this deme begins existing,\n in units of ``time_units`` before the present.\n\n - If the deme has zero ancestors, and ``start_time`` is not specified,\n the start time will be set to ``inf``.\n - If the deme has one ancestor, and ``start_time`` is not specified,\n the ``start_time`` will be set to the ancestor's ``end_time``.\n - If the deme has multiple ancestors, the ``start_time`` must be\n provided.\n\n :param float end_time: The time at which this deme stops existing,\n in units of ``time_units`` before the present.\n If not specified, defaults to ``0.0`` (the present).\n :param initial_size: The initial population size of the deme.\n This must be provided.\n :param final_size: The final population size of the deme. If ``None``,\n the deme has a constant ``initial_size`` population size.\n :param float selfing_rate: The default selfing rate for this deme.\n May be ``None``.\n :param float cloning_rate: The default cloning rate for this deme.\n May be ``None``.\n :param epochs: Epochs that define population sizes, selfing rates, and\n cloning rates, for the deme over various time periods.\n If not specified, a single epoch will be created for the deme that\n spans from ``start_time`` to ``end_time``, using the ``initial_size``,\n ``final_size``, ``selfing_rate`` and ``cloning_rate`` provided.\n \"\"\"\n if id in self:\n raise ValueError(f\"deme {id} already exists in this graph\")\n if initial_size is None and epochs is not None:\n initial_size = epochs[0].initial_size\n if initial_size is None:\n raise ValueError(f\"must set initial_size for {id}\")\n if selfing_rate is None:\n selfing_rate = self.selfing_rate\n if cloning_rate is None:\n cloning_rate = self.cloning_rate\n # set the start time to inf or to the ancestor's end time, if not given\n if ancestors is not None:\n if not isinstance(ancestors, (list, tuple)):\n raise TypeError(\"ancestors must be a list of deme IDs\")\n for ancestor in ancestors:\n if ancestor not in self:\n raise ValueError(f\"ancestor deme {ancestor} not in graph\")\n if start_time is not None:\n anc = self[ancestor]\n if not (anc.start_time >= start_time >= anc.end_time):\n raise ValueError(\n f\"start_time={start_time} is outside the interval \"\n f\"of existence for ancestor {ancestor} \"\n f\"({anc.start_time}, {anc.end_time})\"\n )\n if start_time is None:\n if len(ancestors) > 1:\n raise ValueError(\n \"with multiple ancestors, start_time must be specified\"\n )\n start_time = self[ancestors[0]].end_time\n if len(ancestors) == 1 and proportions is None:\n proportions = [1.0]\n else:\n if start_time is None:\n start_time = float(\"inf\")\n # build the deme, and then add epochs as necessary\n if epochs is None:\n # if epochs are not given, we assign a single epoch over that deme\n if final_size is None:\n final_size = initial_size\n if end_time is None:\n end_time = 0\n if initial_size == final_size:\n size_function = \"constant\"\n else:\n size_function = \"exponential\"\n epoch = Epoch(\n start_time=start_time,\n end_time=end_time,\n initial_size=initial_size,\n final_size=final_size,\n size_function=size_function,\n selfing_rate=selfing_rate,\n cloning_rate=cloning_rate,\n )\n deme = Deme(\n id=id,\n description=description,\n ancestors=ancestors,\n proportions=proportions,\n epochs=[epoch],\n selfing_rate=selfing_rate,\n cloning_rate=cloning_rate,\n )\n else:\n if end_time is None:\n end_time = epochs[-1].end_time\n if end_time != epochs[-1].end_time:\n raise ValueError(\"deme and final epoch end times do not align\")\n if epochs[0].selfing_rate is None:\n epochs[0].selfing_rate = selfing_rate\n if epochs[0].cloning_rate is None:\n epochs[0].cloning_rate = cloning_rate\n # deal with first epoch and deme start times\n if epochs[0].start_time is None:\n # first epoch starts at deme start time\n epochs[0].start_time = start_time\n elif epochs[0].start_time < start_time:\n # insert const size epoch to reach the start of first listed epoch\n epochs.insert(\n 0,\n Epoch(\n start_time=start_time,\n end_time=epochs[0].start_time,\n initial_size=initial_size,\n final_size=initial_size,\n size_function=\"constant\",\n selfing_rate=selfing_rate,\n cloning_rate=cloning_rate,\n ),\n )\n elif epochs[0].start_time > start_time:\n raise ValueError(\n \"first epoch start time must be less than or equal to \"\n \"deme start time\"\n )\n # set up sizes of first deme, since subsequent demes are built off of it\n if epochs[0].final_size is None:\n epochs[0].final_size = epochs[0].initial_size\n if epochs[0].size_function is None:\n if epochs[0].initial_size == epochs[0].final_size:\n epochs[0].size_function = \"constant\"\n else:\n epochs[0].size_function = \"exponential\"\n deme = Deme(\n id=id,\n description=description,\n ancestors=ancestors,\n proportions=proportions,\n epochs=[epochs[0]],\n selfing_rate=selfing_rate,\n cloning_rate=cloning_rate,\n )\n for epoch in epochs[1:]:\n deme.add_epoch(epoch)\n self._deme_map[deme.id] = deme\n self.demes.append(deme)\n\n def check_time_intersection(self, deme1, deme2, time):\n deme1 = self[deme1]\n deme2 = self[deme2]\n time_lo = max(deme1.end_time, deme2.end_time)\n time_hi = min(deme1.start_time, deme2.start_time)\n if time is not None:\n if not (time_lo <= time <= time_hi):\n raise ValueError(\n f\"{time} not in interval [{time_lo}, {time_hi}], \"\n f\"as defined by the time-intersection of {deme1.id} \"\n f\"(start_time={deme1.start_time}, end_time={deme1.end_time}) \"\n f\"and {deme2.id} (start_time={deme2.start_time}, \"\n f\"end_time={deme2.end_time}).\"\n )\n return time_lo, time_hi\n\n def symmetric_migration(self, *, demes=[], rate=0, start_time=None, end_time=None):\n \"\"\"\n Add continuous symmetric migrations between all pairs of demes in a list.\n\n :param demes: list of deme IDs. Migration is symmetric between all\n pairs of demes in this list.\n :param rate: The rate of migration per generation.\n :param start_time: The time at which the migration rate is enabled.\n :param end_time: The time at which the migration rate is disabled.\n \"\"\"\n if len(demes) < 2:\n raise ValueError(\"must specify two or more demes\")\n for source, dest in itertools.permutations(demes, 2):\n self.migration(\n source=source,\n dest=dest,\n rate=rate,\n start_time=start_time,\n end_time=end_time,\n )\n\n def migration(self, *, source, dest, rate=0, start_time=None, end_time=None):\n \"\"\"\n Add continuous migration from one deme to another.\n Source and destination demes follow the forwards-in-time convention,\n so that the migration rate refers to the movement of individuals from\n the ``source`` deme to the ``dest`` deme.\n\n :param source: The ID of the source deme.\n :param dest: The ID of the destination deme.\n :param rate: The rate of migration per generation.\n :param start_time: The time at which the migration rate is enabled.\n If ``None``, the start time is defined by the earliest time at\n which the demes coexist.\n :param end_time: The time at which the migration rate is disabled.\n If ``None``, the end time is defined by the latest time at which\n the demes coexist.\n \"\"\"\n for deme_id in (source, dest):\n if deme_id not in self:\n raise ValueError(f\"{deme_id} not in deme graph\")\n time_lo, time_hi = self.check_time_intersection(source, dest, start_time)\n if start_time is None:\n start_time = time_hi\n else:\n self.check_time_intersection(source, dest, start_time)\n if end_time is None:\n end_time = time_lo\n else:\n self.check_time_intersection(source, dest, end_time)\n self.migrations.append(\n Migration(\n source=source,\n dest=dest,\n start_time=start_time,\n end_time=end_time,\n rate=rate,\n )\n )\n\n def pulse(self, *, source, dest, proportion, time):\n \"\"\"\n Add a pulse of migration at a fixed time.\n Source and destination demes follow the forwards-in-time convention.\n\n :param source: The ID of the source deme.\n :param dest: The ID of the destination deme.\n :param proportion: At the instant after migration, this is the expected\n proportion of individuals in the destination deme made up of individuals\n from the source deme.\n :param time: The time at which migrations occur.\n \"\"\"\n for deme_id in (source, dest):\n if deme_id not in self:\n raise ValueError(f\"{deme_id} not in deme graph\")\n self.check_time_intersection(source, dest, time)\n\n # Check for models that have multiple pulses defined at the same time.\n # E.g. chains of pulses like: deme0 -> deme1; deme1 -> deme2,\n # where reversing the order of the pulse definitions changes the\n # interpretation of the model. Such models are valid, but the behaviour\n # may not be what the user expects.\n # See https://github.com/grahamgower/demes/issues/46\n sources = set()\n dests = set()\n for pulse in self.pulses:\n if pulse.time == time:\n sources.add(pulse.source)\n dests.add(pulse.dest)\n if source in dests or dest in (sources | dests):\n warnings.warn(\n \"Multiple pulses are defined for the same deme(s) at time \"\n f\"{time}. The ancestry proportions after this time will thus \"\n \"depend on the order in which the pulses have been specified. \"\n \"To avoid unexpected behaviour, the deme graph can instead \"\n \"be structured to introduce a new deme at this time with \"\n \"the desired ancestry proportions.\"\n )\n\n self.pulses.append(\n Pulse(source=source, dest=dest, time=time, proportion=proportion)\n )\n\n @property\n def successors(self):\n \"\"\"\n Lists of successors for all demes in the graph.\n \"\"\"\n # use collections.defaultdict(list)\n succ = {}\n for deme_info in self.demes:\n succ.setdefault(deme_info.id, [])\n if deme_info.ancestors is not None:\n for a in deme_info.ancestors:\n succ.setdefault(a, [])\n succ[a].append(deme_info.id)\n return succ\n\n @property\n def predecessors(self):\n \"\"\"\n Lists of predecessors (ancestors) for all demes in the graph.\n \"\"\"\n pred = {}\n for deme_info in self.demes:\n pred.setdefault(deme_info.id, [])\n if deme_info.ancestors is not None:\n for a in deme_info.ancestors:\n pred[deme_info.id].append(a)\n return pred\n\n def split(self, *, parent, children, time):\n \"\"\"\n Add split event at a given time. Split events involve a parental deme\n whose end time equals the start time of all children demes.\n\n :param str parent: The ID of the parent deme.\n :param children: A list of IDs of the descendant demes.\n :type children: list of str\n :param float time: The time at which split occurs.\n \"\"\"\n for child in children:\n # check parent/children relationship and end/start times\n if child == parent:\n raise ValueError(\"cannot be ancestor of own deme\")\n if self[parent].end_time != self[child].start_time:\n raise ValueError(\n f\"{parent} and {child} must have matching end and start times\"\n )\n # the ancestor of each child population is set\n self[child].ancestors = [parent]\n self.splits.append(Split(parent=parent, children=children, time=time))\n\n def branch(self, *, parent, child, time):\n \"\"\"\n Add branch event at a given time.\n\n :param str parent: The ID of the parent deme.\n :param str child: The ID of the child deme.\n :param float time: The time at which branch event occurs.\n \"\"\"\n if (\n self[child].start_time < self[parent].end_time\n or self[child].start_time >= self[parent].start_time\n ):\n raise ValueError(\n f\"{child} start time must be within {parent} time interval\"\n )\n # set the ancestor of the child population\n self[child].ancestors = [parent]\n self.branches.append(Branch(parent=parent, child=child, time=time))\n\n def merge(self, *, parents, proportions, child, time):\n \"\"\"\n Add merger event at a given time, where multiple parents contribute to\n a descendant deme, and the parent demes cease to exist at that time.\n\n :param parents: The list of IDs for the ancestral demes.\n :type parents: list of str\n :param proportions: Proportions of ancestral demes contributing to descendant.\n :type proportions: list of float\n :param str child: The ID of the descendant deme.\n :param float time: The time at which merger occurs.\n \"\"\"\n if self[child].start_time != time:\n raise ValueError(\n f\"{child}'s start time must equal admixture time of {time}\"\n )\n # for parental populations, we check that their end time is <= merge time\n for parent in parents:\n if self[parent].end_time > time:\n raise ValueError(f\"deme {parent} has end time earlier than {time}\")\n # if any parent end times are more recent than merge time, we adjust the end\n # and remove epochs that extend beyond that merger time\n for parent in parents:\n if self[parent].end_time < time:\n while self[parent].epochs[-1].end_time < time:\n if self[parent].epochs[-1].start_time <= time:\n del self[parent].epochs[-1]\n else:\n self[parent].epochs[-1].end_time = time\n # set the ancestors and proportions of the child deme\n self[child].ancestors = parents\n self[child].proportions = proportions\n self.mergers.append(\n Merge(parents=parents, proportions=proportions, child=child, time=time)\n )\n\n def admix(self, *, parents, proportions, child, time):\n \"\"\"\n Add admixture event at a given time, where multiple parents contribute to\n a descendant deme, and the parent demes continue to exist beyond that time.\n\n :param parents: The list of IDs for the ancestral demes.\n :type parents: list of str\n :param proportions: Proportions of ancestral demes contributing to descendant.\n :type proportions: list of float\n :param str child: The ID of the descendant deme.\n :param float time: The time at which admixture occurs.\n \"\"\"\n if self[child].start_time != time:\n raise ValueError(\n f\"{child}'s start time must equal admixture time of {time}\"\n )\n # for parental populations, we check that their end time is <= admixture time\n for parent in parents:\n if self[parent].end_time > time:\n raise ValueError(f\"deme {parent} has end time earlier than {time}\")\n # set the ancestors and proportions of the child deme\n self[child].ancestors = parents\n self[child].proportions = proportions\n self.admixtures.append(\n Admix(parents=parents, proportions=proportions, child=child, time=time)\n )\n\n def get_demographic_events(self):\n \"\"\"\n Loop through successors/predecessors to add splits, branches, mergers,\n and admixtures to the deme graph. If a deme has more than one predecessor,\n then it is a merger or an admixture event, which we differentiate by end and\n start times of those demes. If a deme has a single predecessor, we check\n whether it is a branch (start time != predecessor's end time), or split.\n\n This is only used when we build a demography from a YAML file, since it\n uses the successors/predecessors that are determined by ancestor relationships.\n \"\"\"\n splits_to_add = {}\n for c, p in self.predecessors.items():\n if len(p) == 0:\n continue\n elif len(p) == 1:\n if self[c].start_time == self[p[0]].end_time:\n splits_to_add.setdefault(p[0], set())\n splits_to_add[p[0]].add(c)\n else:\n self.branch(parent=p[0], child=c, time=self[c].start_time)\n else:\n time_aligned = True\n for deme_from in p:\n if self[c].start_time != self[deme_from].end_time:\n time_aligned = False\n if time_aligned is True:\n self.merge(\n parents=self[c].ancestors,\n proportions=self[c].proportions,\n child=c,\n time=self[c].start_time,\n )\n else:\n self.admix(\n parents=self[c].ancestors,\n proportions=self[c].proportions,\n child=c,\n time=self[c].start_time,\n )\n for deme_from, demes_to in splits_to_add.items():\n self.split(\n parent=deme_from, children=list(demes_to), time=self[deme_from].end_time\n )\n\n def validate(self):\n \"\"\"\n Validates the demographic model.\n \"\"\"\n loads(dumps(self))\n\n def in_generations(self):\n \"\"\"\n Return a copy of the demes graph with times in units of generations.\n \"\"\"\n deme_graph = copy.deepcopy(self)\n deme_graph.time_units = \"generations\"\n generation_time = self.generation_time\n if generation_time is not None:\n deme_graph.generation_time = None\n for deme in deme_graph.demes:\n for epoch in deme.epochs:\n epoch.start_time /= generation_time\n epoch.end_time /= generation_time\n for migration in deme_graph.migrations:\n migration.start_time /= generation_time\n migration.end_time /= generation_time\n for pulse in deme_graph.pulses:\n pulse.time /= generation_time\n deme_graph.splits = []\n deme_graph.branches = []\n deme_graph.mergers = []\n deme_graph.admixtures = []\n deme_graph.get_demographic_events()\n return deme_graph\n\n def asdict(self):\n \"\"\"\n Return a dict representation of the deme graph.\n \"\"\"\n return attr.asdict(self)\n\n def asdict_compact(self):\n \"\"\"\n Return a dict representation of the deme graph, with default and\n implicit values removed.\n \"\"\"\n d = dict(\n description=self.description,\n time_units=self.time_units,\n )\n if self.generation_time is not None:\n d.update(generation_time=self.generation_time)\n if self.doi is not None:\n d.update(doi=self.doi)\n\n if self.selfing_rate is not None:\n d.update(selfing_rate=self.selfing_rate)\n if self.cloning_rate is not None:\n d.update(cloning_rate=self.cloning_rate)\n\n assert len(self.demes) > 0\n d.update(demes=dict())\n for deme in self.demes:\n deme_dict = dict()\n # add ancestors to deme if not None\n if deme.ancestors is not None:\n deme_dict.update(ancestors=deme.ancestors)\n if len(deme.ancestors) > 1:\n deme_dict.update(proportions=deme.proportions)\n if any([deme.start_time != self[a].end_time for a in deme.ancestors]):\n deme_dict.update(start_time=deme.start_time)\n else:\n # corner case of no ancestors but finite start time\n if math.isfinite(deme.start_time):\n deme_dict.update(start_time=deme.start_time)\n # add selfing and cloning rates, if not None\n if deme.selfing_rate is not None:\n if self.selfing_rate is None or (\n self.selfing_rate is not None\n and deme.selfing_rate != self.selfing_rate\n ):\n deme_dict.update(selfing_rate=deme.selfing_rate)\n if deme.cloning_rate is not None:\n if self.cloning_rate is None or (\n self.cloning_rate is not None\n and deme.cloning_rate != self.cloning_rate\n ):\n deme_dict.update(cloning_rate=deme.cloning_rate)\n\n assert len(deme.epochs) > 0\n e_list = []\n for j, epoch in enumerate(deme.epochs):\n e = dict()\n # end time required for epochs\n e.update(end_time=epoch.end_time)\n e.update(initial_size=epoch.initial_size)\n if epoch.final_size != epoch.initial_size:\n e.update(final_size=epoch.final_size)\n if epoch.size_function not in [\"constant\", \"exponential\"]:\n e.update(size_function=epoch.size_function)\n if epoch.selfing_rate is not None:\n if deme.selfing_rate is not None:\n if epoch.selfing_rate != deme.selfing_rate:\n e.update(selfing_rate=epoch.selfing_rate)\n elif self.selfing_rate is not None:\n if epoch.selfing_rate != self.selfing_rate:\n e.update(selfing_rate=epoch.selfing_rate)\n else:\n e.update(selfing_rate=epoch.selfing_rate)\n if epoch.cloning_rate is not None:\n if deme.cloning_rate is not None:\n if epoch.cloning_rate != deme.cloning_rate:\n e.update(cloning_rate=epoch.cloning_rate)\n elif self.cloning_rate is not None:\n if epoch.cloning_rate != self.cloning_rate:\n e.update(cloning_rate=epoch.cloning_rate)\n else:\n e.update(cloning_rate=epoch.cloning_rate)\n e_list.append(e)\n if len(e_list) > 1:\n # if more than one epoch, list all epochs\n deme_dict.update(epochs=e_list)\n else:\n # if a single epoch, don't list as under epochs\n deme_dict.update(initial_size=e_list[0][\"initial_size\"])\n if \"final_size\" in e_list[0]:\n if e_list[0][\"final_size\"] != e_list[0][\"initial_size\"]:\n deme_dict.update(final_size=e_list[0][\"final_size\"])\n if e_list[0][\"end_time\"] > 0:\n deme_dict.update(end_time=e_list[0][\"end_time\"])\n if deme.description is not None:\n deme_dict.update(description=deme.description)\n d[\"demes\"][deme.id] = deme_dict\n\n if len(self.migrations) > 0:\n m_dict = collections.defaultdict(list)\n for migration in self.migrations:\n m_dict[(migration.source, migration.dest)].append(\n dict(rate=migration.rate)\n )\n time_lo, time_hi = self.check_time_intersection(\n migration.source, migration.dest, None\n )\n if migration.end_time != time_lo:\n m_dict[(migration.source, migration.dest)][-1].update(\n end_time=migration.end_time\n )\n if migration.start_time != time_hi:\n m_dict[(migration.source, migration.dest)][-1].update(\n start_time=migration.start_time\n )\n # collapse into symmetric and asymmetric migrations\n m_symmetric = []\n m_asymmetric = []\n for (source, dest), m_list in m_dict.items():\n # check if there is equal, reverse migration over the same epoch\n if (dest, source) in m_dict:\n for m in m_list:\n no_symmetry = True\n for i, m_compare in enumerate(m_dict[(dest, source)]):\n if m == m_compare:\n m_symmetric.append(\n dict(demes=[source, dest], rate=m[\"rate\"])\n )\n if \"start_time\" in m:\n m_symmetric[-1][\"start_time\"] = m[\"start_time\"]\n if \"end_time\" in m:\n m_symmetric[-1][\"end_time\"] = m[\"end_time\"]\n # pop the m_compare so we don't repeat it\n m_dict[(dest, source)].remove(m_compare)\n no_symmetry = False\n break\n if no_symmetry:\n m_asymmetric.append(\n dict(source=source, dest=dest, rate=m[\"rate\"])\n )\n if \"start_time\" in m:\n m_asymmetric[-1][\"start_time\"] = m[\"start_time\"]\n if \"end_time\" in m:\n m_asymmetric[-1][\"end_time\"] = m[\"end_time\"]\n else:\n # all ms in m_list are asymmetric\n for m in m_list:\n m_asymmetric.append(\n dict(source=source, dest=dest, rate=m[\"rate\"])\n )\n if \"start_time\" in m:\n m_asymmetric[-1][\"start_time\"] = m[\"start_time\"]\n if \"end_time\" in m:\n m_asymmetric[-1][\"end_time\"] = m[\"end_time\"]\n migrations_out = {}\n if len(m_symmetric) > 0:\n migrations_out[\"symmetric\"] = m_symmetric\n if len(m_asymmetric) > 0:\n migrations_out[\"asymmetric\"] = m_asymmetric\n if len(migrations_out) > 0:\n d.update(migrations=migrations_out)\n\n if len(self.pulses) > 0:\n d.update(pulses=[attr.asdict(pulse) for pulse in self.pulses])\n\n return d\n","sub_path":"demes/demes.py","file_name":"demes.py","file_ext":"py","file_size_in_byte":57453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"519278378","text":"# -*- coding: utf-8 -*-\nimport json\nfrom django.forms import widgets\nfrom django.utils.safestring import mark_safe\n\n\nclass JSONMultiWidget(widgets.MultiWidget):\n \"\"\"Base class for MultiWidgets using a JSON field in database\"\"\"\n def decompress(self, value):\n values = value and json.loads(value) or {}\n for prefix in self.prefixes:\n values.setdefault(prefix, None)\n return values\n\n def value_from_datadict(self, data, files, name):\n result = dict((p, data.get(p) or None) for p in self.prefixes)\n return result\n\n def render(self, name, value, attrs=None):\n values = self.decompress(value)\n html = '
'\n for index, prefix in enumerate(self.prefixes):\n html += self.widgets[index].render(prefix, values.get(prefix), attrs)\n html += '
'\n return mark_safe(html)\n\n\nclass ExtraStylesWidget(JSONMultiWidget):\n \"\"\"\n Creates one or more independent text fields to keep extra styles applied to the\n corresponding HTML element.\n \"\"\"\n def __init__(self, styles):\n margin_widgets = [widgets.TextInput({ 'placeholder': s }) for s in styles]\n super(ExtraStylesWidget, self).__init__(margin_widgets)\n self.prefixes = styles[:]\n\n\nclass MultipleRadioButtonsWidget(JSONMultiWidget):\n \"\"\"\n Creates one or more independent rows of radio button widgets, each of which declares its own\n choices. widget_choices shall be a tuple of tuples containing the name followed by a tuple\n of two-choices of choices.\n \"\"\"\n def __init__(self, choices):\n if not choices or not isinstance(choices, (list, tuple)) or not isinstance(choices[0], tuple):\n raise AttributeError('choices must be list or tuple of tuples')\n radio_widgets = dict((key, widgets.RadioSelect(choices=ch)) for key, ch in choices)\n super(MultipleRadioButtonsWidget, self).__init__(radio_widgets.values())\n self.prefixes = radio_widgets.keys()\n\n\nclass MultipleCheckboxesWidget(widgets.CheckboxSelectMultiple):\n \"\"\"\n Creates one or more independent rows of radio button widgets, each of which declares its own\n choices. widget_choices shall be a tuple of tuples containing the name followed by a tuple\n of two-choices of choices.\n \"\"\"\n def __init__(self, choices):\n if not choices or not isinstance(choices, (list, tuple)) or not isinstance(choices[0], tuple):\n raise AttributeError('choices must be list or tuple of tuples')\n super(MultipleCheckboxesWidget, self).__init__(choices=choices)\n self.labels = [choice[0] for choice in choices]\n\n def render(self, name, value, attrs=None):\n values = value and json.loads(value) or []\n values += [None] * (len(self.labels) - len(values))\n html = '
'\n html += super(MultipleCheckboxesWidget, self).render(name, values, attrs)\n html += '
'\n return mark_safe(html)\n","sub_path":"cmsplugin_bootstrap/change_form_widgets.py","file_name":"change_form_widgets.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"475952370","text":"#!/Library/Frameworks/Python.framework/Versions/3.5/bin/python3\n\nimport os\nimport re\nimport subprocess\nimport urllib3\n\nscript_path = os.path.abspath(__file__)\nfolder_name = 'ff7'\nfolder = os.path.join(os.path.split(script_path)[0], folder_name)\nos.makedirs(folder, exist_ok=True)\n\ndef relative_path(path):\n return os.path.join(folder, path)\n\nhttp = urllib3.PoolManager()\n\nalbum_url = 'http://finalfantasy.wikia.com/wiki/Final_Fantasy_VII:_Original_Soundtrack'\nalbum_page = http.request('GET', album_url).data.decode('utf-8')\nsongs = re.findall(r'\"(.+?)\" ?<', album_page)\nfor i, match in enumerate(songs):\n for link, replacement in re.findall(r'((.+?))', match):\n match = match.replace(link, replacement)\n songs[i] = match\n\nalbum_url = 'http://www.ffmages.com/final-fantasy-vii/original-soundtrack/'\nalbum_page = http.request('GET', album_url).data.decode('utf-8')\nmatches = re.findall(r'', album_page)\ndisk_lengths = [23, 21, 23, 18]\ntrack = 1\ndisk = 1\nfor abs_track, match in enumerate(matches):\n if track > disk_lengths[0]:\n track = 1\n disk += 1\n disk_lengths.pop(0)\n filename = '{} {}.mp3'.format(str(abs_track + 1).zfill(2), songs[abs_track].replace('/', ':'))\n filepath = relative_path(filename)\n print('Downloading \"{}\"... '.format(songs[abs_track]), end='', flush=True)\n with open(relative_path(filename), 'wb') as f:\n mp3_url = 'http://www.ffmages.com' + match\n f.write(http.request('GET', mp3_url).data)\n subprocess.run([\n 'mid3v2',\n '--song=' + songs[abs_track],\n '--artist=Square',\n '--album=Final Fantasy VII',\n '--TCOM', 'Nobuo Uematsu',\n '--year=1997',\n '--track={}/{}'.format(abs_track + 1, 85),\n '--TPOS', '{}/{}'.format(disk, 4),\n filepath],\n stdout=subprocess.DEVNULL)\n print('done.')\n track += 1\n","sub_path":"ff7.py","file_name":"ff7.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"84898315","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QWidget\nfrom PyQt5.QtGui import QIcon\n\n\n\nclass Example(QWidget):\n def __init__(self):\n super().__init__()\n self.initUI() # 界面绘制交给InitUi方法\n def initUI(self):\n # 设置窗口的位置和大小\n self.setGeometry(300, 300, 600, 400)\n # 设置窗口的标题\n self.setWindowTitle('Icon')\n # 设置窗口的图标,引用当前目录下的web.png图片\n self.setWindowIcon(QIcon('web.png'))\n # 显示窗口\n self.show()\nif __name__ == '__main__':\n # 创建应用程序和对象\n app = QApplication(sys.argv)\n w = QWidget()\n w.resize(300, 200)\n w.move(300, 300)\n w.setWindowTitle('第一个qt桌面应用')\n w.show()\n #ex = Example()\n sys.exit(app.exec_())","sub_path":"mymain2.py","file_name":"mymain2.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"163956389","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 24 22:41:24 2016\n\n@author: Ratishankar\n\"\"\"\nimport numpy as np\nfrom astropy.stats import sigma_clip\nfrom photutils import aperture_photometry\nfrom photutils import CircularAperture\nimport fitg as f\nclass datared:\n \"\"\" \n This is actual data Reduction class which performs data reduction\n over the given data cube \n \"\"\"\n def __init__(self,image):\n self.data=image\n\n def bg_sub(self,exrad=0):\n \n \"\"\"\n This is background subtraction method to remove \n unwanted cosmic signal from each image frame\n -----------------------------\n Input: datacube with image Frames\n exrad:radius around max value to be excluded from background\n subraction\n Output:Frame with mean Background Subtracted\n \"\"\"\n data=self.data\n for i in range(len(data)):\n a,b=np.where(data[i]==data[i].max())\n row,col=data[i].shape\n r=exrad\n y,x = np.ogrid[-a:row-a,-b:col-b]\n mask = x*x + y*y <= r*r\n data[i][~mask] -=np.mean(data[i][~mask]) \n self.data=data\n return data\n \n def sigma_clipd(self,sigma=3,iters=1):\n \n \"\"\"\n This is sigma cliping method to remove \n outlier pixel value and replace them with\n median pixel values\n ---------------------------\n Paramters:\n sigma:coeeficient of sigma \n iters:Numbers of iterations of sigma clipping\n \n OUTPUT:\n Returns sigma clipped data cube \n \n \"\"\"\n data=self.data\n med=np.median(data,axis=0)\n for i in range(iters):\n cd=sigma_clip(data,sigma=sigma,iters=1,axis=0)\n for n in range(len(data)):\n cd[n][cd[n].mask]=med[cd[n].mask]\n \n self.data=cd.data\n return cd.data \n \n def Gauss2Dfit(self,pixbox=7):\n \"\"\"\n Gaussian 2D Fitting over the Pixel box around \n brightest pixel in each Frame of datacube.\n ------------------------------------------\n Parameters:pixbox->side of box around brightest pixel\n \n Outputs:list of Gaussian 2d Fitted peak values\n \n \"\"\"\n side=pixbox/2\n data=self.data\n peak=[]\n fit2dG=f.fitg()\n for i in range(len(data)):\n x,y=np.where(data[i]==data[i].max())\n mdata=data[i][x-side:x+side,y-side:y+side]\n peak.append(fit2dG.fitgaussian(mdata)[0])\n return peak\n \n def aper_photometry(self,sky_annulus):\n \"\"\"\n This method performs aperture photmetry \n and store the result in table\n -----------------------------\n Parameters:\n sky_annulus\n \n Output:\n Result Table\n \n \"\"\"\n data=self.data\n table=[]\n for n in range(len(data)):\n x,y=np.where(data[n]==data[n].max())\n position=[x,y]\n apertures = CircularAperture(position,sky_annulus)\n table.append(aperture_photometry(data[n], apertures))\n \n return table","sub_path":"dataredu.py","file_name":"dataredu.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"313845302","text":"import sys\n\nr, c, a = [int(k) for k in input().split()]\n\n\ndef correct_print_2d(arr):\n arr = arr[:]\n for i, row in enumerate(arr):\n nrow = ''\n for c in row:\n if c == \"#\":\n nrow += \"%3s\" % c\n else:\n nrow += \"%3d\" % c\n arr[i] = nrow\n print(\"\\n\".join(arr), \"\\n\", file=sys.stderr)\n\n\ndef build_map(tx, ty, cx, cy, askii_map):\n global r, c\n bmap = [[-1 for i in range(c)] for i in range(r)]\n for i in range(r):\n for j in range(c):\n if askii_map[i][j] in \"?#\":\n bmap[i][j] = \"#\"\n\n opened = [(tx, ty)]\n bmap[ty][tx] = 0\n found_key = False\n point = 0\n print(tx, ty, file=sys.stderr)\n while 1:\n to = []\n for x, y in opened:\n point = bmap[y][x]\n if y + 1 < r and bmap[y + 1][x] == -1:\n bmap[y + 1][x] = point + 1\n to += [(x, y + 1)]\n if y - 1 >= 0 and bmap[y - 1][x] == -1:\n bmap[y - 1][x] = point + 1\n to += [(x, y - 1)]\n if x + 1 < c and bmap[y][x + 1] == -1:\n bmap[y][x + 1] = point + 1\n to += [(x + 1, y)]\n if x - 1 >= 0 and bmap[y][x - 1] == -1:\n bmap[y][x - 1] = point + 1\n to += [(x - 1, y)]\n opened = to[:]\n if len(opened) == 0:\n break\n if (cx, cy) in opened:\n found_key = True\n break\n correct_print_2d(bmap)\n return bmap, found_key, point\n\n\ndef build_path(tx, ty, cx, cy, bmap, askii_map):\n global r, c\n askii_map = askii_map[:]\n path = []\n x = cx\n y = cy\n while x != tx or y != ty:\n point = bmap[y][x]\n if y + 1 < r and bmap[y + 1][x] == point - 1:\n path += [\"UP\"]\n y += 1\n askii_map[y] = askii_map[y][:x] + \"↑\" + askii_map[y][x + 1:]\n elif y - 1 >= 0 and bmap[y - 1][x] == point - 1:\n path += [\"DOWN\"]\n y -= 1\n askii_map[y] = askii_map[y][:x] + \"↓\" + askii_map[y][x + 1:]\n elif x + 1 < c and bmap[y][x + 1] == point - 1:\n path += [\"LEFT\"]\n x += 1\n askii_map[y] = askii_map[y][:x] + \"←\" + askii_map[y][x + 1:]\n elif x - 1 >= 0 and bmap[y][x - 1] == point - 1:\n path += [\"RIGHT\"]\n x -= 1\n askii_map[y] = askii_map[y][:x] + \"→\" + askii_map[y][x + 1:]\n path = path[::-1]\n\n print(\" -> \".join(path), file=sys.stderr)\n\n return path\n\n\ndef mark_box(marked_map, dot):\n dx, dy = dot\n for y in range(-1, 2):\n for x in range(-1, 2):\n if dx + x in range(0, c) and dy + y in range(0, r):\n marked_map[dy + y][dx + x] = 1\n\n\ndef build_map_about_marks(tx, ty, askii_map, marked_map):\n global r, c\n bmap = [[-1 for i in range(c)] for i in range(r)]\n for i in range(r):\n for j in range(c):\n if askii_map[i][j] in \"?#C\":\n bmap[i][j] = \"#\"\n\n opened = [(tx, ty)]\n bmap[ty][tx] = 0\n found_key = False\n point = 0\n print(tx, ty, file=sys.stderr)\n while 1:\n to = []\n for x, y in opened:\n point = bmap[y][x]\n if y + 1 < r and bmap[y + 1][x] == -1:\n bmap[y + 1][x] = point + 1\n to += [(x, y + 1)]\n if y - 1 >= 0 and bmap[y - 1][x] == -1:\n bmap[y - 1][x] = point + 1\n to += [(x, y - 1)]\n if x + 1 < c and bmap[y][x + 1] == -1:\n bmap[y][x + 1] = point + 1\n to += [(x + 1, y)]\n if x - 1 >= 0 and bmap[y][x - 1] == -1:\n bmap[y][x - 1] = point + 1\n to += [(x - 1, y)]\n opened = to[:]\n if len(opened) == 0:\n break\n for x, y in opened:\n if marked_map[y][x] == 0:\n point = (x, y)\n found_key = True\n break\n if found_key:\n break\n correct_print_2d(bmap)\n return bmap, found_key, point\n\n\ndef can_go_back():\n bmap, found_key, point = build_map(tx, ty, cx, cy, askii_map)\n return found_key and point <= a\n\n\nmarked_map = [[0 for _ in range(c)] for _ in range(r)]\n\ngo_to_c = False\ngo_to_t = False\n\nwhile True:\n ky, kx = [int(i) for i in input().split()]\n find_q = False\n cx = cy = qx = qy = tx = ty = -1\n askii_map = []\n for i in range(r):\n row = input()\n askii_map += [row]\n if \"C\" in row:\n cx = row.index(\"C\")\n cy = i\n if \"T\" in row:\n tx = row.index(\"T\")\n ty = i\n\n mark_box(marked_map, (kx, ky))\n\n bmap, found_key, l = build_map(cx, cy, tx, ty, askii_map)\n if found_key and l <= a:\n go_to_c = True\n\n if kx == cx and ky == cy:\n go_to_t = True\n\n if go_to_t:\n bmap, found_key, point = build_map(kx, ky, tx, ty, askii_map)\n path = build_path(kx, ky, tx, ty, bmap, askii_map)\n elif go_to_c:\n bmap, found_key, point = build_map(kx, ky, cx, cy, askii_map)\n path = build_path(kx, ky, cx, cy, bmap, askii_map)\n else:\n bmap, found_key, point = build_map_about_marks(kx, ky, askii_map, marked_map)\n x, y = point\n path = build_path(kx, ky, x, y, bmap, askii_map)\n\n print(path[0])\n","sub_path":"codingame/The Labyrinth.py","file_name":"The Labyrinth.py","file_ext":"py","file_size_in_byte":5304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"115068761","text":"import sys\n\n#gather imformation about the LFSR\nprint(\"Enter Poly\")\npoly = sys.stdin.readline()\npoly = poly.replace('\\n', '').split(' ')\npoly_list = []\nfor coefient in poly:\n\tpoly_list.append(int(coefient))\npoly = poly_list\n\t\nprint(\"Enter inital value in bin\")\ninit_val = sys.stdin.readline().replace('\\n','')\nregisters = [0] * int(poly[len(poly) - 1])\n\nfor x in range(len(init_val)):\n\tregisters[x] = (int(init_val[x]))\n\nprint(\"Enter number of output values\")\nrounds = int(sys.stdin.readline())\n\nprint()\nprint(\"Inital values:\")\nprint(registers)\nprint(\"Outputs:\")\nfor i in range(rounds):\n\toutput = registers[len(registers) - 1]\n\t#print(i+1, registers, output)\n\tprint(output, end = '')\n\tnext = 0\n\t#add the values in the registers\n\tfor coefient in poly[0:len(poly) - 1]:\n\t\tnext += registers[(len(registers)-1) - coefient] % 2\n\t\n\t#shift the register values\n\tregisters = registers[-1:] + registers[:-1]\n\t#for j in range(len(registers)-2):\n\t#\tregisters[j] = registers[j+1]\n\t\n\t#set the new value\n\tregisters[0] = next % 2\n\t\nprint()\n","sub_path":"lfsr.py","file_name":"lfsr.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"215473417","text":"# coding = utf-8\n\"\"\"\ncreate on : 2017/09/24\nproject name : NLP_100\nfile name : problem_no_30 \n\nThis problem using neko.txt\nThis file is available at \"http://www.cl.ecei.tohoku.ac.jp/nlp100/\".\nThis file NOT include this repository.\nIf you need file, please get above web site.\n\npreparation : 夏目漱石の小説『吾輩は猫である』の文章(neko.txt)を\n MeCabを使って形態素解析し,その結果をneko.txt.mecabという\n ファイルに保存せよ.このファイルを用いて,\n 以下の問に対応するプログラムを実装せよ.\n\nproblem : 形態素解析結果(neko.txt.mecab)を読み込むプログラムを実装せよ.\n ただし,各形態素は表層形(surface),基本形(base),品詞(pos),\n 品詞細分類1(pos1)をキーとするマッピング型に格納し,\n 1文を形態素(マッピング型)のリストとして表現せよ.\n 第4章の残りの問題では,ここで作ったプログラムを活用せよ.\n\n\"\"\"\nimport glob\nimport json\n\nfrom pprint import pprint\n\nfrom tqdm import tqdm\nimport MeCab\n\nNEKO_TXT_MECAB_PATH = \"./neko.txt.mecab\"\nSURFACE = \"surface_form\"\nFEATURE_LIST = [\"part_of_speech\", # 品詞\n \"part_of_speech_subcategory1\", # 品詞細分類1\n \"part_of_speech_subcategory2\", # 品詞細分類2\n \"part_of_speech_subcategory3\", # 品詞細分類3\n \"conjugation_form\", # 活用型\n \"conjugation\", # 活用形\n \"lexical_form\", # 原形\n \"yomi\", # 読み\n \"pronunciation\"] # 発音\n\n\ndef morphological_analysis_parse(surface, feature, nlp):\n \"\"\" parse morphological analysis result\n\n :param surface: surface form string\n :param feature: feature contents string\n :param nlp: nlp 100 or no boolean\n :return: morphological analysis result dictionary\n \"\"\"\n\n if surface:\n morph_dic = {SURFACE: surface}\n feature_split = feature.split(\",\")\n\n for f_key, f_value in zip(FEATURE_LIST, feature_split):\n morph_dic[f_key] = f_value\n\n else:\n morph_dic = {}\n\n if nlp is True and morph_dic:\n nlp_morph_dic = {\"surface\": morph_dic[\"surface_form\"],\n \"base\": morph_dic[\"lexical_form\"],\n \"pos\": morph_dic[\"part_of_speech\"],\n \"pos1\": morph_dic[\"part_of_speech_subcategory1\"]}\n\n return nlp_morph_dic\n\n return morph_dic\n\n\ndef morphological_analysis(tagger, sentence, nlp=False):\n \"\"\" morphological analysis given sentence\n\n :param tagger: MeCab tagger object\n :param sentence: given sentence string\n :param nlp: nlp 100 or no boolean\n :return: morphological analysis result list by sentence\n \"\"\"\n\n morpheme = tagger.parseToNode(sentence)\n\n morpheme_result = []\n\n while morpheme:\n surface = morpheme.surface\n feature = morpheme.feature\n\n morpheme_dic = morphological_analysis_parse(surface, feature, nlp)\n\n if morpheme_dic:\n morpheme_result.append(morpheme_dic)\n\n morpheme = morpheme.next\n\n return morpheme_result\n\n\ndef morphological_analysis_main(text, nlp):\n \"\"\" morphological analysis given text\n\n :param text: given text List\n :param nlp: nlp 100 or no boolean\n :return: morphological analysis result list by text\n \"\"\"\n\n tagger = MeCab.Tagger()\n\n neko_morph_list = []\n\n for t in tqdm(text):\n morph = morphological_analysis(tagger, t, nlp=nlp)\n neko_morph_list.append(morph)\n\n return neko_morph_list\n\n\ndef neko_morpheme():\n \"\"\" Morphological analysis data is given by neko.txt,\n and save json file\n\n :return: result message\n \"\"\"\n\n neko_txt_mecab = glob.glob(NEKO_TXT_MECAB_PATH)\n\n if neko_txt_mecab:\n return \"already exists morphological_analysis result\"\n\n neko_path = \"./neko.txt\"\n with open(neko_path, mode=\"r\", encoding=\"utf-8\") as f:\n original_text = f.read()\n\n text_split = original_text.split(\"\\n\")\n\n text = [t.strip(\" \") for t in text_split if t]\n\n neko_morph_list = morphological_analysis_main(text, nlp=True)\n\n with open(NEKO_TXT_MECAB_PATH, mode=\"w\", encoding=\"utf-8\") as f:\n json.dump(neko_morph_list, f, ensure_ascii=False)\n\n return \"morphological_analysis complete\"\n\n\ndef get_neko_morpheme_list():\n \"\"\" load morphological analysis data\n\n :return: morphological analysis result list data\n \"\"\"\n\n with open(NEKO_TXT_MECAB_PATH, mode=\"r\", encoding=\"utf-8\") as f:\n neko_morpheme_list = json.load(f)\n\n return neko_morpheme_list\n\n\ndef problem_no_30():\n \"\"\" morphological analysis text and store List and result dict data\n\n :return: morphological analysis result list data\n \"\"\"\n print(neko_morpheme())\n morpheme_result = get_neko_morpheme_list()\n\n return morpheme_result\n\n\nif __name__ == \"__main__\":\n pprint(problem_no_30())\n","sub_path":"codes/chapter_04/problem_no_30.py","file_name":"problem_no_30.py","file_ext":"py","file_size_in_byte":5010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"631382487","text":"# -*- coding: utf-8 -*\r\nimport os\r\nimport time\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy import fftpack\r\nfrom scipy import signal\r\nimport glob\r\nimport sys\r\n\r\ncount = 0\r\nmode = 0\r\nplt.close('all')\r\nprint(\"正常、異常、ちょっと異常データのどちらですか?\")\r\nprint(\"正常:1 異常:2 ちょっと異常:3\")\r\nmode = input(\">>> \")\r\nif int(mode) == 1:\r\n os.chdir(\"test_normal_fft\")\r\n input_file_list = glob.glob('../test_normal/*.csv')\r\nelif int(mode) == 2:\r\n os.chdir(\"test_abnormal_fft\")\r\n input_file_list = glob.glob('../test_abnormal/*.csv')\r\nelif int(mode) == 3:\r\n os.chdir(\"test_abnormal2_fft\")\r\n input_file_list = glob.glob('../test_abnormal2/*.csv')\r\nelse:\r\n print(\"入力したモードがありません\")\r\n sys.exit(1)\r\nfor filename in input_file_list:\r\n with open (filename, 'r') as input:\r\n (time, data) = np.loadtxt(filename,unpack=True, delimiter=\",\", skiprows = 1,usecols = (1,1))\r\n\r\n fs = 10000.0 # サンプリング周波数\r\n f,t,Sxx = signal.spectrogram(data, fs, nperseg=512)\r\n print(filename)\r\n plt.figure()\r\n plt.pcolormesh(t,f,Sxx,vmax=1e-6)\r\n plt.xlim([0,2.1])\r\n plt.xlabel(u\"Time [sec]\")\r\n plt.ylabel(u\"Freq [Hz]\")\r\n plt.colorbar()\r\n #plt.show()\r\n count = count + 1\r\n plt.savefig(str(count) + '.png')\r\n plt.close('all')\r\n","sub_path":"test-data-fft-csv.py","file_name":"test-data-fft-csv.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"98110038","text":"import numpy as np\n\ndef mag(flux):\n mag = 25-2.5*np.log10(flux)\n return(mag)\n\ndef color(fluxb,fluxr):\n color = -2.5*np.log10(fluxb/fluxr)\n return(color)\n\ndef interpolate_color(cat):\n '''\n Input:\n 3dhst catalog\n \n Functionality:\n Take 3dhst full catalog (used to generate models) and \n calculated the upper and lower bound of color (f125w-f160w)\n for an assortment of redshift bins\n \n Return:\n upper and lower color bound to be interpolated and applied to \n grism sample\n \n '''\n mag_140 = Column(25 - 2.5*np.log10(np.maximum(cat['f_F140W'], 1e-4)),name = 'mag_F140W')\n mag_160 = Column(25 - 2.5*np.log10(np.maximum(cat['f_F160W'], 1e-4)),name = 'mag_F160W')\n mag_125 = Column(25 - 2.5*np.log10(np.maximum(cat['f_F125W'], 1e-4)),name = 'mag_F125W')\n color = Column((mag_125 - mag_160),name = 'color')\n \n clip = (cat['star_flag'] != 1) & (cat['use_phot'] == 1)\n clip &= (mag_160 > 0) & (mag_160 < 28) & (mag_125 > 0) & (mag_125 < 28) & np.isfinite(cat['lmass']) & (cat['lmass']>9)\n clip &= (cat['z_peak'] > 0.02) & (cat['star_flag'] != 1) & (cat['use_phot'] == 1) #& (cat['z_peak']<1.875)\n\n cat_interp = cat[clip]\n cat_interp.add_column(mag_160[clip], index=0)\n cat_interp.add_column(mag_140[clip], index=0)\n cat_interp.add_column(mag_125[clip], index=0)\n cat_interp.add_column(color[clip], index=0)\n\n #print(len(cat_interp))\n\n cat_interp['colorf125'] = cat_interp['mag_F125W']\n # Bin uvista data and calculate hmag cut at each bin\n bins = np.arange(0.25,2,0.25)\n inds = np.digitize(cat_interp['z_peak'],bins+.125)\n \n color_up = np.zeros(7)\n color_low = np.zeros(7)\n for i in (np.arange(7)):\n zslice = cat_interp[inds==i]\n #print(np.median(zslice['z_peak']))\n color_up[i],color_low[i] = np.percentile(zslice['color'],[97,3])\n\n # Check out the hmag cuts\n\n plt.scatter(bins,color_up)\n plt.scatter(bins,color_low)\n plt.xlabel('z_peak')\n plt.ylabel('color')\n #plt.savefig('../figures/uvista_hmag_cut.png')\n plt.show()\n \n \n return(bins,color_up,color_low)\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"394182797","text":"import json\nimport random\nimport time\nfrom urllib.request import urlopen\n\nimport pandas as pd\nimport pymysql\nimport requests\n\n\n# DB 연결\ndef connect_db():\n try:\n conn = pymysql.connect(\n user = \"admin\",\n passwd = \"1q2w3e4r!\",\n host = \"ott-service-project.cbmthuyhcguk.ap-northeast-2.rds.amazonaws.com\",\n port = 3306,\n database = \"ott-service\",\n charset='utf8'\n )\n # cur = conn.cursor()\n return conn\n\n except Exception as e:\n return print(e)\n\n\n\n# CONNECT DB\ndb = connect_db()\n\n\n# 전체 데이터 받아놓기 \nSHOW_CONTENTS = \"SELECT * FROM contents\"\ncontents = pd.read_sql(SHOW_CONTENTS, db)\n\n\n# 헤더 정보가 없으면 에러가 나는 경우가 있어서 미리 넣었습니다.\n# 윈도우의 경우 여기서 에러가 난다면 자신의 헤더 정보로 바꿔주세요.\nheaders = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'}\n\n\n# 이 크롤러 파일과 같은 위치에 json 파일을 둡니다.\nFILE_PATH = 'jw_ott_dataset.json'\nwith open(FILE_PATH, 'r') as json_file:\n raw_data = json.load(json_file)\n\nids = raw_data.keys()\n\n\ndata = dict()\nerror = dict()\n\n# Just Watch image query \nfor id in ids:\n \n try:\n\n if raw_data[id]['category'] == 'TV 프로그램' : \n ctgr = 'show'\n elif raw_data[id]['category'] == '영화':\n ctgr = 'movie'\n\n BASE_URL = f'https://apis.justwatch.com/content/titles/{ctgr}/{id}/locale/ko_KR?language=ko'\n\n \n \n try:\n time.sleep(random.uniform(0.1, 0.3))\n raw_text = requests.get(BASE_URL)\n raw_json = raw_text.json()\n img_id = raw_json['poster'].split('/')[2]\n\n img_url = f'https://images.justwatch.com/poster/{img_id}/s332'\n\n\n # img 폴더를 미리 만들어두세요.\n with urlopen(img_url) as f:\n with open ('./img/' + 'J_' + f'{id}' + '.jpg', 'wb') as h:\n img = f.read()\n h.write(img)\n \n data[id] = img_url\n\n except Exception as e:\n # 에러가 나서 다운로드가 끊겼다면 에러 메시지를 보시고 끊긴 지점을 알려주세요.\n print(id)\n print(e)\n error[id] = BASE_URL\n \n\n except Exception as e:\n print(e)\n\n\nsave_file_path = f'./jw_imgurl.json'\n\nwith open(save_file_path, 'w') as outfile:\n json.dump(data, outfile, indent=4, ensure_ascii = False)\n \nprint('wrote:',save_file_path)\n\n\nerror_file_path = f'./jw_error.json'\nwith open(error_file_path, 'w') as outfile:\n json.dump(error, outfile, indent=4, ensure_ascii = False)\n \nprint('wrote:',error_file_path)\n","sub_path":"data/crawling/justwatch/jw_img_crawler.py","file_name":"jw_img_crawler.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"295962415","text":"import torch\nfrom skimage.io import imread\nimport numpy as np\nimport os\nimport cv2\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\n\nclass KvasirDistillationDataset(torch.utils.data.Dataset):\n def __init__(self, img_paths, mask_paths, softlabel_paths, img_size, transform=None, type=\"train\"):\n self.img_paths = img_paths\n self.mask_paths = mask_paths\n self.softlabel_paths = softlabel_paths\n self.img_size = img_size\n self.transform = transform\n self.type = type\n\n def __len__(self):\n return len(self.img_paths)\n\n def __getitem__(self, idx):\n img_path = self.img_paths[idx]\n mask_path = self.mask_paths[idx]\n softlabel_path = self.softlabel_paths[idx]\n\n image_ = np.array(Image.open(img_path).convert(\"RGB\"))\n mask = np.array(Image.open(mask_path).convert(\"L\"))\n softlabel = np.array(Image.open(softlabel_path).convert(\"L\"))\n\n augmented = self.transform(image=image_, mask=mask, softlabel = softlabel)\n image = augmented[\"image\"]\n mask = augmented[\"mask\"]\n softlabel = augmented[\"softlabel\"]\n softlabel = softlabel/255\n mask_resize = mask\n mask = mask / 255\n\n if self.type == \"train\":\n mask = cv2.resize(mask, (self.img_size, self.img_size))\n softlabel = cv2.resize(softlabel, (self.img_size, self.img_size))\n elif self.type == \"val\":\n mask_resize = cv2.resize(mask, (self.img_size, self.img_size))\n mask_resize = mask_resize[:, :, np.newaxis]\n\n mask_resize = mask_resize.astype(\"float32\")\n mask_resize = mask_resize.transpose((2, 0, 1))\n\n image = cv2.resize(image, (self.img_size, self.img_size))\n image = image.astype(\"float32\") / 255\n image = image.transpose((2, 0, 1))\n\n mask = mask[:, :, np.newaxis]\n mask = mask.astype(\"float32\")\n mask = mask.transpose((2, 0, 1))\n\n\n softlabel = softlabel[:, :, np.newaxis]\n softlabel = softlabel.astype(\"float32\")\n softlabel = softlabel.transpose((2, 0, 1))\n # print(mask.shape,softlabel.shape)\n\n if self.type == \"train\":\n return np.asarray(image), np.asarray(mask), np.asarray(softlabel)\n\n elif self.type == \"test\":\n return (\n np.asarray(image),\n np.asarray(mask),\n os.path.basename(img_path),\n np.asarray(image_),\n )\n else:\n return (\n np.asarray(image),\n np.asarray(mask),\n np.asarray(mask_resize),\n )\n\n\nif __name__ == \"__main__\":\n import albumentations as al\n from albumentations.augmentations import transforms\n from albumentations.core.composition import Compose, OneOf\n\n from glob import glob\n\n train_img_paths = []\n train_mask_paths = []\n train_data_path = [\"data/kvasir-seg/TrainDataset\"]\n for i in train_data_path:\n train_img_paths.extend(glob(os.path.join(i, \"images\", \"*\")))\n train_mask_paths.extend(glob(os.path.join(i, \"masks\", \"*\")))\n train_img_paths.sort()\n train_mask_paths.sort()\n\n transforms = al.Compose(\n [\n transforms.RandomRotate90(),\n transforms.Flip(),\n transforms.HueSaturationValue(),\n transforms.RandomBrightnessContrast(),\n transforms.Transpose(),\n OneOf(\n [\n transforms.RandomCrop(220, 220, p=0.5),\n transforms.CenterCrop(220, 220, p=0.5),\n ],\n p=0.5,\n ),\n # transforms.Resize(352,352),\n # transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n ],\n p=0.7,\n )\n dataset = KvasirDataset(\n train_img_paths, train_mask_paths, 352, transform=transforms, type=\"train\"\n )\n\n fig, ax = plt.subplots(1, 2, figsize=(10, 9))\n image = dataset[0][0].transpose((1, 2, 0))\n mask = dataset[0][1].transpose((1, 2, 0))\n\n ax[0].imshow(image)\n ax[1].imshow(mask)\n\n np.histogram(image)\n","sub_path":"dataloader/kvasir_distillation.py","file_name":"kvasir_distillation.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"621828764","text":"\"\"\"\nTest automatic layout of multiple viewboxes using Grid.\n\n\n\"\"\"\nfrom vispy import scene\nfrom vispy import app\nimport numpy as np\n\ncanvas = scene.SceneCanvas(close_keys='escape')\ncanvas.size = 600, 600\ncanvas.show()\n\ngrid = scene.widgets.Grid(canvas.scene)\n\n\n# Ensure that grid fills the entire canvas, even after resize.\n@canvas.events.resize.connect\ndef update_grid(event=None):\n global grid, canvas\n grid.size = canvas.size\n print(canvas.size)\n\nupdate_grid()\n\nb1 = grid.add_view(row=0, col=0, col_span=2)\nb1.scene.camera = scene.cameras.TwoDCamera()\nb1.scene.camera.transform.scale = (10, 5)\nb1.scene.camera.transform.translate = (5, 0)\nb1.border = (1, 0, 0, 1)\nb1.preferred_clip_method = 'viewport'\n\nb2 = grid.add_view(row=1, col=0)\nb2.scene.camera = scene.cameras.TwoDCamera()\nb2.scene.camera.transform.scale = (10, 5)\nb2.scene.camera.transform.translate = (-2, 0)\nb2.border = (1, 0, 0, 1)\nb2.preferred_clip_method = 'viewport'\n\nb3 = grid.add_view(row=1, col=1)\nb3.scene.camera = scene.cameras.TwoDCamera()\nb3.scene.camera.transform.scale = (5, 5)\nb3.scene.camera.transform.translate = (0, 0)\nb3.border = (1, 0, 0, 1)\nb3.preferred_clip_method = 'viewport'\n\n\n# Add one line to all three boxes\nN = 10000\npos = np.empty((N, 2), dtype=np.float32)\npos[:, 0] = np.linspace(0, 10, N)\npos[:, 1] = np.random.normal(size=N)\n\ncolor = np.ones((N, 4), dtype=np.float32)\ncolor[:, 0] = np.linspace(0, 1, N)\ncolor[:, 1] = color[::-1, 0]\n\nl1 = scene.visuals.Line(pos=pos, color=color)\n#l1.transform = scene.transforms.AffineTransform()\n#l1.transform.scale((10, 1))\n#l1.transform.translate((20, 100))\n\nb1.add(l1)\n\ntr1 = scene.visuals.Visual()\ntr1.transform = scene.transforms.LogTransform(base=(2, 0, 0))\nl1.add_parent(tr1)\nb2.add(tr1)\n\ntr2 = scene.visuals.Visual()\ntr2.transform = scene.transforms.PolarTransform()\nl1.add_parent(tr2)\nb3.add(tr2)\n\n\n# add image to b1\n#img_data = np.random.normal(size=(100, 100, 3), loc=128,\n# scale=50).astype(np.ubyte)\n\n#image = scene.visuals.Image(img_data)\n#image.transform = scene.transforms.AffineTransform()\n#image.transform.scale((1, 1))\n#b1.add(image)\n\nimport sys\nif sys.flags.interactive == 0:\n app.run()\n","sub_path":"examples/scene/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"45430152","text":"from models import Heading\nfrom stib.stib import Traject\n\nfrom datetime import datetime, timedelta\nimport time\nimport sys\n\n\ndef every(timedelta):\n next_ = start = datetime.now()\n while True:\n next_ += timedelta\n yield next_\n\n\ndef sleep_until(target):\n now = datetime.now()\n delta = target - now\n if delta.total_seconds() > 0:\n time.sleep(delta.total_seconds())\n\n\ntraject = Traject(94, 1)\nfor next_loop in every(timedelta(seconds=20)):\n try:\n traject.update()\n stops = map(lambda x: x.present, traject.stops)\n h = Heading(line=str(traject.id), way=str(traject.way), stops=stops, timestamp=traject.last_update)\n h.save()\n print(\".\", end=\"\")\n sys.stdout.flush()\n except Execption as e:\n print(datetimee.now(), e)\n\n sleep_until(next_loop)\n","sub_path":"scraper/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"50323002","text":"from app.models.MongodbConn import MongoPipeline\n\nclass BaseStudent():\n\n def __init__(self,name):\n self.student_info = self.query_basic_info(name=name)\n self.mac = self.student_info['mac']\n self.name = self.student_info['name']\n self.student_id = self.student_info['studentid']\n self.class_id = self.student_info['class_num']\n self.nickname = None\n\n\n def query_basic_info(self,name):\n conn = MongoPipeline()\n conn.open_connection('qiandao_mac_name')\n student_info = conn.getIds_one('info',{'name':name})\n return student_info\n\n\nif __name__ == '__main__':\n student = BaseStudent('万仕贤')\n print(student.name,student.mac,student.class_id,student.student_id)\n\n\n","sub_path":"app/models/BaseStudent.py","file_name":"BaseStudent.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"512356313","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 12 19 -2018\n@author: lbrein\n\n zhao 基金长线策略: bias 》 1 或bias 《 -1\n 日线,每分钟持续跟踪\n\n\"\"\"\n\nfrom com.base.public import public, logger\nimport numpy as np\nimport pandas as pd\nimport talib as ta\nimport uuid\nfrom com.data.interface_Rice import interface_Rice\nfrom com.object.obj_entity import stock_orderForm, stock_baseInfo\nfrom com.object.mon_entity import mon_trainOrder\nfrom multiprocessing import Pool, Manager\nimport time\nimport copy\n\n# 选股\nclass train_etf50_kline(object):\n\n def __init__(self):\n self.period = '1d'\n self.pageCount = 50\n\n self.timePeriodsList = [150]\n self.timePeriods = 150\n\n self.dropLineList = [0.6]\n self.dropLine = 0.20\n\n self.sarStartList = [0.02, 0.35, 0.05]\n self.sarStart = 0.02\n self.sarEndList = [0.02, 0.35, 0.05]\n self.sarEnd = 0.05\n\n self.iniAmount = 250000\n self.shift = 0.002\n self.ratio = 0.0006\n\n self.saveMongo = False\n self.methodName = 'filter_5'\n\n self.startDate = public.getDate(diff=-300) # 60天数据回测\n self.endDate = public.getDate(diff=0)\n self.iterCondList = ['timePeriods', 'dropLine', 'sarStart', 'sarEnd']\n\n def empty(self):\n Record = stock_orderForm()\n Record.tablename = 'stock_orderForm_train'\n Record.empty(filter=\" method='%s'\" % self.methodName)\n if self.saveMongo:\n TrainOrder = mon_trainOrder()\n TrainOrder.empty({'method': '%s' % self.methodName})\n\n def pool_filter(self):\n #Rice = interface_Rice()\n #Rice.int_stockBase()\n Base = stock_baseInfo()\n Base.iniBound()\n\n lists = Base.getCodes(isBound=0)\n pool = Pool(processes=6)\n\n for k in range(0, len(lists), self.pageCount):\n codes = lists[k: k + self.pageCount]\n pool.apply_async(self.subFilter, (codes, k))\n\n pool.close()\n pool.join()\n\n def subFilter(self, codes, k):\n Rice = interface_Rice()\n Base = stock_baseInfo()\n period = 180\n line = 0.35\n res = Rice.kline(codes, period=self.period, start=self.startDate, end=self.endDate, pre=90)\n\n codeList = []\n for code in codes:\n df = res[code]\n # 计算跌幅和回跌幅度\n close = df['close']\n mx = close[-period:].max()\n mi = close[-period:].min()\n miw = ta.MININDEX(close, timeperiod=period).values[-1]\n mid = close[miw:].max()\n\n # 超过M5\n ma5 = ta.MA(close, timeperiod=5)\n last = close.values[-1]\n\n opt1 = (mx-mi)/mx > line and (mid-mi)/(mx-mi) < 0.372\n opt2 = (last > ma5.values[-1] or last > ma5.values[-2])\n if opt1 and opt2:\n codeList.append(code)\n #print(code, (mx-mi)/mx, (mid-mi)/(mx-mi))\n\n print(k, len(codeList))\n Base.updateBound(codeList)\n\n\n def pool(self):\n pool = Pool(processes=6)\n self.empty()\n\n Base = stock_baseInfo()\n lists = Base.getCodes(isBound=0)\n\n for k in range(0, len(lists), self.pageCount):\n codes = lists[k:k+self.pageCount]\n #self.start(codes, int(k/self.pageCount+1))\n try:\n print(k)\n pool.apply_async(self.start, (codes, int(k/self.pageCount+1)))\n pass\n except Exception as e:\n print(e)\n continue\n\n pool.close()\n pool.join()\n\n def iterCond(self):\n # 多重组合参数输出\n keys = self.iterCondList\n for s0 in self.__getattribute__(keys[0] + 'List'):\n self.__setattr__(keys[0], s0)\n\n for s1 in self.__getattribute__(keys[1] + 'List'):\n self.__setattr__(keys[1], s1)\n\n for s2 in self.__getattribute__(keys[2] + 'List'):\n self.__setattr__(keys[2], s2)\n\n for s3 in self.__getattribute__(keys[3] + 'List'):\n self.__setattr__(keys[3], s3)\n\n yield '%s_%s_%s_%s' % (str(s0), str(s1), str(s2), str(s3))\n\n\n # 分段布林策略\n def start(self, codes, n):\n time0 = time.time()\n print('process %s start:' % str(n))\n self.Rice = interface_Rice()\n self.Record = stock_orderForm()\n self.Record.tablename = 'stock_orderForm_train'\n self.TrainOrder = mon_trainOrder()\n\n self.codes = codes\n res = self.Rice.kline(codes, period=self.period, start=self.startDate, end=self.endDate,pre=90)\n\n for code in codes:\n for conds in self.iterCond():\n\n self.uid = '%s_%s_pop' % (code.replace('.', '_'), conds)\n self.batchid = uuid.uuid1()\n\n df = res[code]\n # 计算统一特征\n df['createTime'] = df.index\n df = self.add_stock_index(df)\n\n df['code'] = code\n df['mode'] = df.apply(lambda row: self.point(row), axis=1)\n\n if code.find('603383') > -1 or code.find('601139') > -1:\n file = self.Rice.basePath + '%s.csv' % self.uid\n print(file)\n df.to_csv(file, index=1)\n\n self.saveStage(df)\n\n print('process %s end: %s ' % (str(n),str(time.time()-time0)))\n\n def point(self, row):\n mx, mi, mid, sm, sm5 = (row[key] for key in 'max90,min90,mid,sarm,sarm5'.split(','))\n line = self.dropLine\n\n if not ((mx-mi)/mx > line and (mid-mi)/(mx-mi) < 0.372):\n return 0\n\n return 1 if sm == 1 else -1 if sm5==-1 else 0\n\n def turn(self, mm, md, mode):\n return 0 if mm > 0 else 1 if mode * md > 0 else -1\n\n def mid(self, row, close):\n c1 = close[close.index <= row['createTime']][row['miw']:]\n return c1.max()\n\n def add_stock_index(self, df0, index_list=None):\n\n close = df0[\"close\"]\n\n df0['max90'] = mx = ta.MAX(close, timeperiod = self.timePeriods)\n df0['min90'] = min = ta.MIN(close, timeperiod= self.timePeriods)\n df0['miw'] = ta.MININDEX(close, timeperiod=self.timePeriods)\n df0['mid'] = df0.apply(lambda row: self.mid(row, close), axis=1)\n\n # 穿越\n sar = ta.SAR(df0['high'], df0['low'], acceleration=self.sarStart, maximum=0.2)\n df0['sard'] = sard = close - sar\n df0['sarm'] = sard * sard.shift(1)\n df0['sarm'] = df0.apply(lambda row: self.turn(row['sarm'], row['sard'], 1), axis=1)\n\n sar5 = ta.SAR(df0['high'], df0['low'], acceleration=self.sarEnd, maximum=0.2)\n df0['sard5'] = sard5 = close - sar5\n df0['sarm5'] = sard5 * sard5.shift(1)\n df0['sarm5'] = df0.apply(lambda row: self.turn(row['sarm5'], row['sard5'], 1), axis=1)\n\n return df0\n\n #\n def saveStage(self, df2):\n self.preNode = None\n period, ini = 60, self.iniAmount\n self.mon_records, self.records = [], []\n\n for i in range(period, len(df2)):\n mode, close = (df2.ix[i, key] for key in \"mode,close\".split(\",\"))\n\n isBuy, isRun = -1, False\n pN = self.preNode\n # 部分加仓\n if pN is None and mode ==1 :\n isBuy, isRun, mode = 1, True, mode\n\n elif pN is not None and mode==-1:\n isBuy, isRun, mode = -1, True, mode\n\n if isRun:\n # print(i, isBuy, pos, vol)\n self.order(df2.iloc[i], isBuy, mode)\n\n #print(self.uid, len(self.records))\n # 保存明细\n if len(self.records) > 0:\n self.Record.insertAll(self.records)\n\n if self.saveMongo and len(self.mon_records) > 0:\n #print(\"monsave\", self.uid, self.mon_records)\n self.TrainOrder.col.insert_many(self.mon_records)\n\n def mon_saveTick(self, n0, doc):\n tick = copy.deepcopy(n0.to_dict())\n tick.update(doc)\n for key in ['sarm', 'sarm5', 'miw', 'mode']:\n if key in tick: tick[key] = int(tick[key])\n\n if doc['isBuy'] == -1:\n self.mon_records.append(tick)\n if self.preTick is not None:\n self.preTick['income'] = doc['income']\n self.preTick['enddate'] = doc['createdate']\n #self.preTick['isBuy'] = doc['isstop']\n self.mon_records.append(copy.deepcopy(self.preTick))\n self.preTick = None\n else:\n self.preTick = tick\n\n def order(self, n0, isBuy, mode):\n pN = self.preNode\n now = public.getDatetime()\n vol, fee, amount,income, p0 = 0,0,0,0, 0\n price = n0[\"close\"]\n if isBuy > 0 :\n self.batchid = uuid.uuid1()\n p0 = price * (1+ self.shift)\n vol = int(self.iniAmount/p0)\n amount = vol * p0\n fee = vol * p0 * self.ratio\n income = -fee\n\n elif isBuy < 0:\n p0 = price * (1 - self.shift)\n vol = pN['vol']\n amount = vol * p0\n fee = vol * p0 * self.ratio\n income = amount - pN['amount']-fee\n\n doc = {\n \"code\": n0['code'],\n \"name\": n0['code'],\n \"createdate\": n0['createTime'],\n \"price\": p0,\n \"vol\": vol,\n \"mode\": int(mode),\n \"isBuy\": int(isBuy),\n \"fee\": fee,\n \"amount\": amount,\n \"income\": income,\n \"method\": self.methodName,\n \"batchid\": self.batchid,\n \"uid\": self.uid\n }\n\n self.records.append(doc)\n self.mon_saveTick(n0, doc)\n\n # 设置上一个记录\n if isBuy > 0:\n self.preNode = doc\n else:\n self.preNode = None\n\n return True\n\n\ndef main():\n actionMap = {\n \"start\": 1, #\n \"filter\": 0,\n \"stat\": 0,\n }\n obj = train_etf50_kline()\n\n if actionMap[\"start\"] == 1:\n obj.pool()\n\n\n if actionMap[\"filter\"] == 1:\n obj.pool_filter()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"com/train/train_stock_pop1.py","file_name":"train_stock_pop1.py","file_ext":"py","file_size_in_byte":10188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"170338721","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 26 19:11:59 2015\n\n@author: Ben\n\"\"\"\n\nimport pandas as pd\nimport util\nfrom config import cfg\n\nclass DemandSideOutput(object):\n def __init__(self):\n self.output_types = ['energy_outputs', 'cost_outputs', 'stock_outputs', 'emissions_outputs']\n \n def return_cleaned_output(self, output_type):\n if not hasattr(self, output_type):\n return None\n if type(getattr(self, output_type)) is not pd.core.frame.DataFrame:\n raise ValueError('output_type must be a pandas dataframe')\n cleaned_output = getattr(self, output_type).copy()\n \n dct = cfg.outputs_id_map\n index = cleaned_output.index\n index.set_levels([[dct[name].get(item, item) for item in level] for name, level in zip(index.names, index.levels)], inplace=True)\n index.names = [x.upper() if isinstance(x, basestring) else x for x in index.names]\n cleaned_output.columns = [x.upper() if isinstance(x, basestring) else x for x in cleaned_output.columns]\n \n return cleaned_output\n\n#class Output(object):\n# \"\"\"creates an empty container\"\"\"\n# def __init__(self, output_name, name):\n# self.output_name = output_name.upper()\n# self.name = name.upper()\n#\n#\n#class DemandSectorOutput(Output):\n# def __init__(self, output_name, name, sector_id):\n# self.sector_id = sector_id\n# Output.__init__(self, output_name=output_name, name=name)\n#\n#\n#class DemandSubsectorOutput(Output):\n# def __init__(self, output_name, name, subsector_id):\n# self.subsector_id = subsector_id\n# Output.__init__(self, output_name=output_name, name=name)\n#\n# def clean_subsector_outputs(self):\n# \"\"\"\n# cleans subsector outputs for viewing outside the model\n# \"\"\"\n# geography_lookup_dict = None\n# technology_lookup_dict = None\n# final_energy_lookup_dict = None\n# attributes = vars(self)\n# for att in attributes:\n# att = getattr(self, att)\n# if type(att) == pd.core.frame.DataFrame:\n# self.clean_subsector_geography(att, geography_lookup_dict)\n# self.clean_subsector_technology(att, self.subsector_id, technology_lookup_dict)\n# self.clean_subsector_energy(att, final_energy_lookup_dict)\n# self.clean_subsector_other_indexes(att, self.subsector_id)\n# self.append_subsector(att)\n# self.uppercase_levels_and_names(att)\n#\n# def uppercase_levels_and_names(self, attribute):\n# \"\"\"changes the level names and labels to uppercase\"\"\"\n# for name in attribute.index.names:\n# level_loc = util.position_in_index(attribute, name)\n# level = attribute.index.levels[level_loc]\n# level_upper = [x.upper() for x in level if isinstance(x, unicode)]\n# level_dict = dict(zip(level, level_upper))\n# util.replace_index_label(attribute, level_dict, name)\n# attribute.index.names = [x.upper() for x in attribute.index.names]\n#\n# def append_subsector(self, attribute):\n# \"\"\"append subsector id to dataframe levels\"\"\"\n# attribute['subsector'] = util.sql_read_table('DemandSubsectors', 'name', id=self.subsector_id)\n# attribute.set_index('subsector', append=True, inplace=True)\n#\n#\n# def clean_subsector_geography(self, attribute, geography_lookup_dict):\n# \"\"\"replace geography ids with names\"\"\"\n# if geography_lookup_dict is None:\n# primary_geography = cfg.cfgfile.get('case', 'primary_geography')\n# primary_geography_id = util.sql_read_table('Geographies', 'id', name=primary_geography)\n# geography_lookup_dict = dict(util.sql_read_table('GeographiesData', ['id', 'name'],\n# geography_id=primary_geography_id, return_unique=True,\n# return_iterable=True))\n#\n# util.replace_index_label(attribute, geography_lookup_dict, primary_geography)\n# util.replace_index_name(attribute, 'geography', primary_geography)\n#\n# def clean_subsector_technology(self, attribute, subsector_id, technology_lookup_dict):\n# \"\"\"replace technology ids with technology names\"\"\"\n# if 'technology' in attribute.index.names:\n# if technology_lookup_dict is None:\n# technology_lookup_dict = dict(\n# util.sql_read_table('DemandTechs', ['id', 'name'], subsector_id=subsector_id))\n# util.replace_index_label(attribute, technology_lookup_dict, 'technology')\n# if 'technology_temp' in attribute.index.names:\n# util.replace_index_name(attribute, 'technology', 'technology_temp')\n#\n# def clean_subsector_energy(self, attribute, final_energy_lookup_dict):\n# \"\"\"replace final energy ids with names\"\"\"\n# if 'final_energy' in attribute.index.names:\n# if final_energy_lookup_dict is None:\n# final_energy_lookup_dict = dict(util.sql_read_table('FinalEnergy', ['id', 'name']))\n# util.replace_index_label(attribute, final_energy_lookup_dict, 'final_energy')\n#\n# def clean_subsector_other_indexes(self, attribute, subsector_id):\n# \"\"\"replace other index ids with names\"\"\"\n# other_indexes = [x for x in util.sql_read_table('OtherIndexes', 'name', return_iterable=True) if\n# x not in ['technology', 'final_energy']]\n# other_indexes = [x for x in attribute.index.names if x in other_indexes]\n# for other_index in other_indexes:\n# index_id = util.sql_read_table('OtherIndexes', 'id', name=other_index)\n# lookup_dict = dict(util.sql_read_table('OtherIndexesData', ['id', 'name'], other_index_id=index_id))\n# util.replace_index_label(attribute, lookup_dict, other_index)","sub_path":"energyPATHWAYS/outputs.py","file_name":"outputs.py","file_ext":"py","file_size_in_byte":5900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"62912514","text":"ip_protocol = [\"3pc\", \"crdup\", \"ggp\", \"ip\", \"irtp\", \"mux\", \"rsvp\", \"sps\", \"uti\", \"a/n\", \"crtp\", \"gmtp\", \"ipcomp\",\n \"isis\", \"narp\", \"rsvp-e2e-ignore\", \"srp\", \"vines\", \"ah\", \"dccp\", \"\", \"gre\", \"ipcv\", \"iso-ip\", \"netblt\",\n \"rvd\", \"sscopmce\", \"visa\", \"any\", \"dcn\", \"hip\", \"ipencap\", \"iso-tp4\", \"nsfnet-igp\", \"sat-expak\", \"st\",\n \"vmtp\", \"argus\", \"ddp\", \"hmp\", \"ipip\", \"kryptolan\", \"nvp\", \"sat-mon\", \"stp\", \"vrrp\", \"aris\", \"ddx\",\n \"hopopt\", \"iplt\", \"l2tp\", \"ospf\", \"scc-sp\", \"sun-nd\", \"wb-expak\", \"ax.25\", \"dgp\", \"i-nlsp\", \"ippc\",\n \"larp\", \"pgm\", \"scps\", \"swipe\", \"wb-mon\", \"bbn-rcc\", \"dsr\", \"iatp\", \"ipv6\", \"leaf-1\", \"pim\", \"sctp\",\n \"tcf\", \"wesp\", \"bna\", \"egp\", \"icmp\", \"ipv6-auth\", \"leaf-2\", \"pipe\", \"sdrp\", \"tcp\", \"wsn\", \"br-sat-mon\",\n \"eigrp\", \"idpr\", \"ipv6-crypt\", \"manet\", \"pnni\", \"secure-vmtp\", \"tlsp\", \"xnet\", \"cbt\", \"emcon\",\n \"idpr-cmtp\", \"ipv6-frag\", \"merit-inp\", \"prm\", \"shim6\", \"tp++\", \"xns-idp\", \"cftp\", \"encap\", \"idrp\",\n \"ipv6-icmp\", \"mfe-nsp\", \"ptp\", \"skip\", \"trunk-1\", \"xtp\", \"chaos\", \"esp\", \"ifmp\", \"ipv6-nonxt\", \"micp\",\n \"pup\", \"sm\", \"trunk-2\", \"compaq-peer\", \"etherip\", \"igmp\", \"ipv6-opts\", \"mobile\", \"pvp\", \"smp\", \"ttp\",\n \"cphb\", \"fc\", \"igp\", \"ipv6-route\", \"mpls-in-ip\", \"qnx\", \"snp\", \"udp\", \"cpnx\", \"fire\", \"il\", \"ipx-in-ip\",\n \"mtp\", \"rdp\", \"sprite-rpc\", \"udplite\"]\n\nload_balance_list = [\"dynamic-ratio-member\", \"least-connections-node\", \"predictive-node\", \"ratio-session\",\n \"dynamic-ratio-node\", \"least-sessions\", \"ratio-least-connections-member\", \"round-robin\",\n \"fastest-app-response\", \"observed-member\", \"ratio-least-connections-node\",\n \"weighted-least-connections-member\", \"fastest-node\", \"observed-node\", \"ratio-member\",\n \"weighted-least-connections-node\", \"least-connections-member\", \"predictive-member\", \"ratio-node\"]\n\nother_options_list = [\"min-up-members\", \"min-active-members\", \"min-up-members-action\", \"min-up-members-checking\"]\n\nmember_option_specific_list = [\"state\", \"dynamic-ratio\", \"priority-group\", \"ratio\"]\n\n\nclass F5_virtual_server:\n\n def __init__(self, URL_name, VIP, port, pool_name, virtual_server_name, destination, description, connection_limit):\n\n self.URL_name = input(\"what is the name of the URL? \\n\")\n self.VIP = input(\"What is the VIP? \\n\")\n self.port = input(\"what is the service port? \\n\")\n self.pool_name = f\"/Common/pl_{self.URL_name}_{self.port}\"\n self.virtual_server_name = f\"vs_{self.URL_name}_{self.port}\"\n self.destination = f'{self.VIP}:{self.port}'\n self.description = input(\"Enter the description \\n\")\n self.connection_limit = input(\"What is the connection limit? \\n\")\n\n def partition_func(self):\n\n partition_while = True\n while partition_while:\n print(\"is the partition Common(Y/N)\")\n partition_question = input()\n\n if partition_question == \"Y\":\n self.partition = \"Common\"\n break\n partition_while = False\n\n if partition_question == \"N\":\n print(\"What is the name of the partition?\")\n self.partition = input()\n break\n partition_while = False\n\n if partition_question != \"Y\" or partition_question != \"N\":\n print(\"Available options are Y/N\")\n\n def encryption_func(self):\n\n reencryption = True\n while reencryption:\n\n print(\"Do you need SSL offloading or re-encryption?(Y/N)\")\n encryption_need = input()\n\n if encryption_need == \"Y\":\n clientssl = f\" /Common/pr-sscli_{self.URL_name} {{ context clientside }}\"\n break\n reencryption = False\n\n if encryption_need == \"N\":\n serverside = \"N\"\n print(\"No SSL need\")\n break\n reencryption = False\n\n if encryption_need != \"Y\" or encryption_need != \"N\":\n print(\"Available options are Y/N\")\n\n SSL = True\n while SSL:\n\n if encryption_need == \"N\":\n serverssl_ssl = \"\"\n break\n SSL = False\n\n print(\"Do you need serverside SSL (Y/N)\")\n serverside = input()\n if serverside == \"Y\":\n print(\"is the profile name serverssl_default? (Y/N)\")\n serverssl = input()\n\n if serverssl == \"Y\":\n print(\"Let's use that one then\")\n serverssl_ssl = \"/Common/serverssl_default\"\n break\n SSL = False\n\n if serverssl == \"N\":\n print(\"What is the name of the serverside profile(Include partition, example: /Common/)\")\n serverssl_ssl = input()\n break\n SSL = False\n\n if serverssl != \"Y\" or serverssl != \"N\":\n print(\"Available options are Y/N\")\n\n if serverside == \"N\":\n print(\"No serverside is need\")\n break\n SSL = False\n\n if serverside != \"Y\" or serverside != \"N\":\n print(\"Available options are Y/N\")\n\n if serverside == \"Y\":\n serverSSL = f\"{serverssl_ssl} {{ context serverside }}\"\n\n if serverside == \"N\":\n serverSSL = \"\"\n\n if encryption_need == \"Y\":\n self.SSL_PROFILE = f\"profiles add {{ {clientssl} {serverSSL} }}\"\n if encryption_need == \"N\":\n serverSSL = None\n self.SSL_PROFILE = \"\"\n\n def profile_func(self):\n profile = True\n profile_list = []\n while profile:\n print(\"Do you need any other profile(Y/N)?(example http)\")\n profile_q = input()\n\n if profile_q == \"Y\":\n print(\"what is the name of the profile\")\n profile_need = input()\n profile_list.append(f'profiles add {{ {profile_need} }}')\n\n if profile_q == \"N\":\n print(\"No more profiles neeed\")\n break\n profile = False\n\n if profile_q != \"Y\" or profile_q != \"N\":\n print(\"Available options are Y/N\")\n\n if profile_list == []:\n self.join_list_profile = \"\"\n\n if profile_list != []:\n self.join_list_profile = \" \".join(profile_list)\n\n def persistance_func(self):\n persistance = True\n while persistance:\n print(\"Do you need persistance profile(Y/N)\")\n persistance_option = input()\n\n if persistance_option == \"Y\":\n print(\"what is the name of the profile\")\n persistance_config = input()\n break\n persistance = False\n\n if persistance_option == \"N\":\n print(\"No persistance profiles neeed\")\n break\n persistance = False\n\n if persistance_option != \"Y\" or persistance_option != \"N\":\n print(\"Available options are Y/N\")\n\n if persistance_option == \"Y\":\n self.Apply_persistance = f\"persist replace-all-with {{ {persistance_config} }}\"\n\n if persistance_option == \"N\":\n self.Apply_persistance = \"\"\n\n def protocol_func(self):\n protocol = True\n while protocol:\n print(\"what is the ip-protocol\")\n self.ipprotocol = input()\n\n if self.ipprotocol in ip_protocol:\n self.ipprotocol\n break\n\n else:\n print(\"Here are the available options\")\n for n in ip_protocol:\n print(n)\n\n def vs_config(self):\n print(\"\\n\")\n print(\"Here is the virtual server configuration command \")\n print(\"\\n\")\n return f\"cd /{self.partition} \\ncreate ltm virtual {self.virtual_server_name} description {self.description} destination {self.destination} ip-protocol {self.ipprotocol} {self.join_list_profile} pool {self.pool_name} connection-limit {self.connection_limit} {self.SSL_PROFILE} {self.Apply_persistance} source-address-translation {{ type automap }} translate-address enabled translate-port enabled\"\n\n def first_members_func(self):\n print(\"\\n\")\n print(\"Pool configuration\")\n print(\"\\n\")\n\n first_member_list = []\n first_member_dict = {}\n\n print(\"What is the member IP address? \")\n self.key_address = input()\n\n print(\"what is the member port\")\n self.value_port = input()\n\n first_member_dict[self.key_address] = self.value_port\n\n first_member_option_list = []\n first_member_options = True\n while first_member_options:\n\n member_other_options = input(\"Do you need additional member options for the pool member?(Y/N) \")\n\n if member_other_options == \"Y\":\n print(\"Available options are\")\n for item in member_option_specific_list:\n print(item)\n option = input(\"Enter the option \")\n\n if option == \"state\":\n state_options = input(\"Available options user-down/user-up\")\n self.state_option = f\"state {state_options}\"\n first_member_option_list.append(self.state_option)\n\n if option == \"dynamic-ratio\":\n dynamic_ratio_option = int(input(\"Enter the value \"))\n self.dynamic_ratio = f\"dynamic-ratio {dynamic_ratio_option}\"\n first_member_option_list.append(self.dynamic_ratio)\n\n if option == \"priority-group\":\n priority_group_option = int(input(\"Enter the value \"))\n self.priority_group = f\"priority-group {priority_group_option}\"\n first_member_option_list.append(self.priority_group)\n\n if option == \"ratio\":\n ratio_option = int(input(\"Enter the value \"))\n self.ratio = f\"ratio {ratio_option}\"\n first_member_option_list.append(self.ratio)\n\n if option not in member_option_specific_list:\n print(\"Please try again\")\n\n self.join_member_option_list = \" \".join(first_member_option_list)\n\n if member_other_options == \"N\":\n print(\"No more member options need\")\n break\n first_member_options = False\n\n first_member_list.append(\n f\"members add {{ {self.key_address}:{self.value_port} {{ address {self.key_address} {self.join_member_option_list} session user-enabled }} }}\")\n\n self.join_first_member_list = \" \".join(first_member_list)\n\n def members_func(self):\n try:\n print(\"is the port the same for all the members? (Y/N/single)\")\n same_port = input()\n\n if same_port == \"Y\":\n members = False\n same_port_path = True\n\n if same_port == \"N\":\n members = True\n same_port_path = False\n\n if same_port == \"single\":\n members = False\n same_port_path = False\n print(\"No more members need\")\n\n else:\n print(\"Available options are Y/N/single\")\n\n members_list = []\n members_dict = {}\n count_1 = 0\n while members:\n\n if count_1 == 0:\n new_member = \"Y\"\n\n if count_1 > 0:\n print(\"Do you need more members ? Y/N\")\n new_member = input()\n\n count_1 += 1\n if new_member == \"Y\":\n print(\"What is the member IP address? \")\n key_address = input()\n\n print(\"what is the member port\")\n value_port = input()\n members_dict[key_address] = value_port\n\n pl_member_option_list = []\n options = True\n while options:\n\n member_other_options = input(\"Do you need additional member options for the pool member?(Y/N) \")\n\n if member_other_options == \"Y\":\n print(\"Available options are\")\n for item in member_option_specific_list:\n print(item)\n\n option = input(\"Enter the option \")\n\n if option == \"state\":\n state_options = input(\"Available options user-down/user-up\")\n self.state_option = f\"state {state_options}\"\n pl_member_option_list.append(self.state_option)\n\n if option == \"dynamic-ratio\":\n dynamic_ratio_option = int(input(\"Enter the value \"))\n self.dynamic_ratio = f\"dynamic-ratio {dynamic_ratio_option}\"\n pl_member_option_list.append(self.dynamic_ratio)\n\n if option == \"priority-group\":\n priority_group_option = int(input(\"Enter the value \"))\n self.priority_group = f\"priority-group {priority_group_option}\"\n pl_member_option_list.append(self.priority_group)\n\n if option == \"ratio\":\n ratio_option = int(input(\"Enter the value \"))\n self.ratio = f\"ratio {ratio_option}\"\n pl_member_option_list.append(self.ratio)\n\n if option not in member_option_specific_list:\n print(\"Please try again\")\n\n self.join_member_option_list = \" \".join(pl_member_option_list)\n\n if member_other_options == \"N\":\n print(\"No more member options need\")\n break\n options = False\n\n members_list.append(\n f\"members add {{ {key_address}:{value_port} {{ address {key_address} {self.join_member_option_list} session user-enabled }} }}\")\n\n if new_member == \"N\":\n print(\"No more members need\")\n break\n members = False\n\n else:\n print(\"Available options are Y/N\")\n\n count = 0\n while same_port_path:\n\n if count == 0:\n new_member = \"Y\"\n\n if count > 0:\n print(\"Do you need more members ? Y/N\")\n new_member = input()\n\n count += 1\n if new_member == \"Y\":\n print(\"What is the member IP address? \")\n key_address = input()\n members_dict[key_address] = self.value_port\n pl_member_option_list = []\n options = True\n while options:\n\n member_other_options = input(\"Do you need additional member options for the pool member?(Y/N) \")\n\n if member_other_options == \"Y\":\n print(\"Available options are\")\n for item in member_option_specific_list:\n print(item)\n\n option = input(\"Enter the option \")\n\n if option == \"state\":\n state_options = input(\"Available options user-down/user-up\")\n self.state_option = f\"state {state_options}\"\n pl_member_option_list.append(self.state_option)\n\n if option == \"dynamic-ratio\":\n dynamic_ratio_option = int(input(\"Enter the value \"))\n self.dynamic_ratio = f\"dynamic-ratio {dynamic_ratio_option}\"\n pl_member_option_list.append(self.dynamic_ratio)\n\n if option == \"priority-group\":\n priority_group_option = int(input(\"Enter the value \"))\n self.priority_group = f\"priority-group {priority_group_option}\"\n pl_member_option_list.append(self.priority_group)\n\n if option == \"ratio\":\n ratio_option = int(input(\"Enter the value \"))\n self.ratio = f\"ratio {ratio_option}\"\n pl_member_option_list.append(self.ratio)\n\n if option not in member_option_specific_list:\n print(\"Please try again\")\n\n self.join_member_option_list = \" \".join(pl_member_option_list)\n\n if member_other_options == \"N\":\n print(\"No more member options need\")\n break\n options = False\n\n members_list.append(\n f\"members add {{ {key_address}:{self.value_port} {{ address {key_address} {self.join_member_option_list} session user-enabled }} }}\")\n\n if new_member == \"N\":\n print(\"No more members need\")\n break\n members = False\n\n else:\n print(\"Available options are Y/N\")\n\n if members_list == []:\n self.join_list_members = \"\"\n\n members_list.pop()\n self.join_list_members = \" \".join(members_list)\n\n\n except IndexError:\n pass\n\n def load_balancing_mode_func(self):\n load = True\n while load:\n print(\"What is the load balance method? \")\n self.load_balance_method = input()\n\n if self.load_balance_method in load_balance_list:\n self.load_balance_method\n break\n\n else:\n print(\"Here are the available options\")\n for n in load_balance_list:\n print(n)\n\n def pool_other_options_func(self):\n pl_globlal_option_list = []\n optional = True\n\n while optional:\n self.additional_option_list = []\n\n other_options = input(\"Do you need additional options for the pool?(Y/N) \")\n\n if other_options == \"Y\":\n print(\"Available options are\")\n for item in other_options_list:\n print(item)\n\n option = input(\"Enter the option \")\n\n if option == \"min-up-members\":\n min_up_members_option = int(input(\"Enter the value \"))\n self.min_up_members_option = f\"min-up-members {min_up_members_option}\"\n pl_globlal_option_list.append(self.min_up_members_option)\n\n if option == \"min-active-members\":\n min_active_members_option = int(input(\"Enter the value \"))\n self.min_active_members = f\"min-active-members {min_active_members_option}\"\n pl_globlal_option_list.append(self.min_active_members)\n\n if option == \"min-up-members-checking\":\n min_up_members_checking_option = input(\"Available options disabled/enabled \")\n self.min_up_members_checking_option = f\"min-up-members-checking {min_up_members_checking_option}\"\n pl_globlal_option_list.append(self.min_up_members_checking_option)\n\n if option == \"min-up-members-action\":\n min_up_members_action_options = input(\"Available options failover/reboot/restart-all \")\n self.min_up_members_action = f\"min-up-members-action {min_up_members_action_options}\"\n pl_globlal_option_list.append(self.min_up_members_action)\n\n if option not in other_options_list:\n print(\"please try again\")\n\n if other_options == \"N\":\n print(\"No more options need\")\n break\n optional = False\n\n self.join_pl_globlal_option_list = \" \".join(pl_globlal_option_list)\n\n def monitor_func(self):\n self.monitor = input(\"Enter the monitor \")\n\n def pl_config(self):\n print(\"\\n\")\n print(\"Here is the pool configuration command \")\n print(\"\\n\")\n return f\"cd /{self.partition} \\n create ltm pool pl_{self.URL_name}_{self.port} description {self.description} load-balancing-mode {self.load_balance_method} {self.join_first_member_list} {self.join_list_members} {self.join_pl_globlal_option_list} monitor {self.monitor} \"\n\n def vs_ssl_profile(self):\n print(\"\\n\")\n print(\"Here is the SSL client profile configuration command \")\n print(\"\\n\")\n return f\"create ltm profile client-ssl pr-sscli_{self.URL_name} {{ app-service none cert {self.URL_name}_2021.crt cert-key-chain add {{ {self.URL_name}_2021_intermediate-ca {{ cert {self.URL_name}_2021.crt chain intermediate-ca.crt key {self.URL_name}_2021.key }} }} chain intermediate-ca.crt defaults-from clientssl key {self.URL_name}_2021.key passphrase none }} \"\n\n\nK = F5_virtual_server(\"URL_name\", \"VIP\", \"port\", \"pool_name\", \"virtual_server_name\", \"destination\", \"description\",\n \"connection_limit\")\n\nK.partition_func()\nK.encryption_func()\nK.profile_func()\nK.persistance_func()\nK.protocol_func()\n\nK.first_members_func()\nK.members_func()\nK.pool_other_options_func()\nK.load_balancing_mode_func()\nK.monitor_func()\n\nprint(K.vs_ssl_profile())\nprint(K.pl_config())\nprint(K.vs_config())\n","sub_path":"f5_configuration_virtual_server.py","file_name":"f5_configuration_virtual_server.py","file_ext":"py","file_size_in_byte":21993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"473865854","text":"from bs4 import BeautifulSoup\nimport csv\nimport requests\n\nr = requests.get('http://potsherd.net/atlas/Ware/')\nsoup = BeautifulSoup(r.content, \"html.parser\")\n\nf = csv.writer(open(\"output.csv\", \"w\"))\nf.writerow([\"Domain\", \"Path\"]) # Write column headers as the first line\n\ntrs = soup.find_all('tr')\n\nfor tr in trs:\n for link in tr.find_all('a'):\n fulllink = link.get ('href')\n print (fulllink) #print in terminal to verify results\n f.writerow([\"http://potsherd.net/atlas/\", fulllink]) # Write column headers as the first line\n","sub_path":"static/data/potbot.py","file_name":"potbot.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"261107250","text":"\"\"\" Example of grow-cut segmentation \"\"\"\n\nimport numpy as np\n\nfrom growcut import automata, growcut\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib import animation\n\n\n# Load an image of a particular type\nimage = plt.imread('./examples/flower.png')\nlum = np.average(image, 2)\n\n# Form a label grid (0: no label, 1: foreground, 2: background)\nlabel = np.zeros_like(lum, dtype=np.int)\nlabel[:] = -1\nlabel[75:90, 100:110] = 1\nlabel[110:120, 150:160] = 1\nlabel[50:55, 160:165] = 1\nlabel[50:55, 180:185] = 0\nlabel[0:10, 0:10] = 0\nlabel[75:90, 0:10] = 0\nlabel[0:10, 200:210] = 0\nlabel[75:90, 200:210] = 0\n\n# Form a strength grid.\nstrength = np.zeros_like(lum, dtype=np.float64)\nstrength[label != -1] = 1.0\n\n\ncoordinates = automata.formSamples(lum.shape, neighbours=automata.CONNECT_4)\n\n# Plot the image and the label map.\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))\nax1.imshow(lum, interpolation='nearest', cmap='gray')\nax1.contour(label, colors='r')\nax1.axis('off')\n\nimg = ax2.imshow(label, interpolation='nearest', cmap='gray', vmin=0, vmax=1)\nax2.axis('off')\n\n\ndef init():\n img.set_data(label)\n return img,\n\n\ndef animate(i):\n strength[:], label[:] = growcut.numpyAutomate(coordinates, lum, strength, label)\n\n img.set_data((label == 1) * lum)\n return img,\n\n# call the animator. blit=True means only re-draw the parts that have changed.\nanim = animation.FuncAnimation(\n fig,\n animate,\n init_func=init,\n frames=200,\n interval=1,\n #blit=True\n )\n\nplt.show()\n\n","sub_path":"examples/flower.py","file_name":"flower.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"218106102","text":"#chap2- problem 32\nimport string\ndef mutate(s):\n '''\n input : string\n output: words which are the mutations \n of the string \n\n '''\n d=[]\n for x in list(string.ascii_lowercase)+['']:\n for a in range(len(s)):\n c=''\n if a==0:\n c=x+s[1:]\n elif a \",\n completer=completer,\n )\n else:\n as_input = input(f\"{get_flair()} (opt)> \")\n\n # Parse fundamental analysis command of the list of possible commands\n try:\n (ns_known_args, l_args) = opt_parser.parse_known_args(as_input.split())\n\n except SystemExit:\n print(\"The command selected doesn't exist\\n\")\n continue\n\n if ns_known_args.cmd == \"help\":\n pass\n # print_options(s_ticker)\n\n elif ns_known_args.cmd == \"q\":\n # Just leave the options menu\n return False\n\n elif ns_known_args.cmd == \"quit\":\n # Abandon the program\n return True\n\n elif ns_known_args.cmd == \"volume\":\n # call the volume graph\n vol.volume_graph(l_args, s_ticker)\n\n elif ns_known_args.cmd == \"oi\":\n # call the volume graph\n vol.open_interest_graph(l_args, s_ticker)\n else:\n print(\"Command not recognized!\")\n","sub_path":"gamestonk_terminal/options/op_menu.py","file_name":"op_menu.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"53833709","text":"# API key and secret from the crypto.com exchange.\napi_key = \"xxx\"\napi_secret = \"xxx\"\n\n# The list of coin pairs you want to trade with.\npair_list = [\n (\"ADA\", \"ADA_USDT\"),\n (\"ALGO\", \"ALGO_USDT\"),\n (\"ATOM\", \"ATOM_USDT\"),\n (\"BTC\", \"BTC_USDT\"),\n (\"CRO\", \"CRO_USDT\"),\n (\"DOT\", \"DOT_USDT\"),\n (\"ETH\", \"ETH_USDT\"),\n (\"LTC\", \"LTC_USDT\"),\n (\"NANO\", \"NANO_USDT\"),\n (\"VET\", \"VET_USDT\"),\n (\"XLM\", \"XLM_USDT\"),\n (\"XRP\", \"XRP_USDT\")\n]\n\n# This is the quote currency used to invest. Choices are USDT and USDC. Take in mind that\n# some pairs might exist for one, but not the other. YOU CAN NOT USE BOTH SIMULTANEOUSLY!\nquote_currency = \"USDT\"\n#quote_currency = \"USDC\"\n\n# The minimum sell order worth. A sell order can not be executed if the USDT worth that we would\n# get from selling is below this value.\nmin_sell_order_worth = 0.5\n\n# The minimum sell order percentage. The sell order must be at least this percentage larger than\n# the target portfolio percentage for that coin to be sold.\nmin_sell_order_percentage = 3\n\n# The amount of USDT that will be invested per coin pair each iteration.\nbuy_order_value = 0.50\n\n# The rebalance interval in hours.\nrebalance_interval = 1\n\n# The investment interval in hours.\ninvestment_interval = 6","sub_path":"_config-example.py","file_name":"_config-example.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"523751634","text":"num_in = int(input())\n\n\ndef cal(num):\n Q = (num - 1) // 4\n R = (num - 1) % 4\n if Q % 2 == 0:\n return R + 1\n else:\n return 5 - R\n\n\nprint(cal(num_in))\n","sub_path":"algorithm/baekjoon/17363.py","file_name":"17363.py","file_ext":"py","file_size_in_byte":175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"198653157","text":"\"\"\"\nHandful utils that do not deserve a separate module.\n\"\"\"\nimport traceback\nfrom pathlib import Path\nfrom typing import Text, Any, Dict, Tuple, List, TYPE_CHECKING\n\nfrom fhdoc.settings import ASSETS_PATH\n\n\ndef make_title(file_stem):\n\t# type: (Text) -> Text\n\t\"\"\"\n\tWhilst this all seems well and good, it produces unreliable module names\n\tso I'm going to add an early return and add this note again inline\n\n\tConvert `pathlib.Path` part or any other string to a human-readable title.\n\tReplace underscores with spaces and capitalize result.\n\n\tExamples::\n\n\t\tmake_title(Path(\"my_module/my_path.py\").stem)\n\t\t\"My Path\"\n\n\t\tmake_title(\"my_title\")\n\t\t\"My Title\"\n\n\t\tmake_title(\"__init__.py\")\n\t\t\"Init Py\"\n\n\t\tmake_title(Path(\"my_module/__main__.py\").stem)\n\t\t\"Module\"\n\n\tArguments:\n\t\tfile_stem -- Stem from path.\n\n\tReturns:\n\t\tA human-readable title as a string.\n\t\"\"\"\n\t# Whilst this all seems well and good, it produces unreliable module names\n\t# so I'm going to add an early return\n\tif file_stem == \"__main__\":\n\t\treturn \"\\\\_\\\\_main\\\\_\\\\_\"\n\n\treturn file_stem\n\n\t# Old Code - This works as intended\n\t'''\n\tif file_stem == \"__main__\":\n\t\treturn \"Module\"\n\n\tparts = file_stem.replace(\".\", \"_\").split(\"_\")\n\tname_parts = [] # type: List[Text]\n\tfor part in parts:\n\t\tif not part:\n\t\t\tcontinue\n\t\tname_part = part.strip().capitalize()\n\t\tname_parts.append(name_part)\n\n\treturn \" \".join(name_parts)\n\t'''\n\n\ndef render_asset(name, target_path, format_dict):\n\t# type: (Text, Path, Dict[Text, Text]) -> None\n\t\"\"\"\n\tRender `assets/` file to `target_path`.\n\n\tArguments:\n\t\tname -- Asset file name.\n\t\ttarget_path -- Path of output file.\n\t\tformat_dict -- Format asset with values from the dict before writing.\n\t\"\"\"\n\tcontent = (Path(ASSETS_PATH) / name).read_text()\n\tcontent = content.format(**format_dict)\n\ttarget_path.write_text(content)\n\n\ndef extract_md_title(content):\n\t# type: (Text) -> Tuple[Text, Text]\n\t\"\"\"\n\tExtract title from the first line of content.\n\tIf title is present - return a title and a remnaing content.\n\tif not - return an empty title and untouched content.\n\n\tExamples::\n\n\t\textract_md_title('# Title\\\\ncontent')\n\t\t('Title', 'content')\n\n\t\textract_md_title('no title\\\\ncontent')\n\t\t('', 'no title\\\\ncontent')\n\n\tReturns:\n\t\tA tuple fo title and remaining content.\n\t\"\"\"\n\ttitle = \"\"\n\tif content.startswith(\"# \"):\n\t\tif \"\\n\" not in content:\n\t\t\tcontent = \"{}\\n\".format(content)\n\n\t\ttitle_line, content = content.split(\"\\n\", 1)\n\t\ttitle = title_line.split(\" \", 1)[-1]\n\n\t#return title, content\n\treturn \"\", content\n","sub_path":"fhdoc/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"44199187","text":"\"\"\"\n Program: ALBERT\n Module: translator.py\n\n This program is free software; you can redistribute it and/or\n modify it under the terms of the GNU General Public License\n version 2 as published by the Free Software Foundation.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\"\"\"\n__author__ = \"Ray Jackson\"\n__copyright__ = \"Copyright 2019, Ray Jackson\"\n__credits__ = []\n__license__ = \"GNU GPL\"\n__version__ = \"1.0.0\"\n__maintainer__ = \"Ray Jackson\"\n__email__ = \"\"\n__status__ = \"Production\"\n\nimport logging\nfrom copy import deepcopy\n\nimport numpy as np\n\nfrom constants import *\nfrom model_spec import REST_POTENTIAL, MAX_STRENGTH\n\n\nclass DataTranslator:\n\n def __init__(self, plot_request, model_data):\n self.figure_data, self.plot_request = self.split_request(deepcopy(plot_request))\n self.model_data = model_data\n self.sub_plots = []\n self.modules = set([])\n self.plot_data = []\n self.translate()\n\n @staticmethod\n def split_request(plot_request):\n fig_request = None\n plt_request = []\n for request in plot_request:\n if request[PLOT_TYPE] == FIGURE:\n fig_request = request\n else:\n plt_request.append(request)\n return fig_request, plt_request\n\n def translate(self):\n self.translate_figure()\n for item in self.plot_request:\n if item[PLOT_TYPE] == CELL_MAP:\n self.translate_cell_map(item)\n elif item[PLOT_TYPE] == HEATMAP:\n self.translate_heat_map(item)\n elif item[PLOT_TYPE] == LINES:\n self.translate_lines(item)\n else:\n logging.info(f\"Unrecognised plot type: {item['type']}\")\n\n def translate_figure(self):\n def item_key(item: dict):\n return item[MODULE]\n self.plot_request.sort(key=item_key)\n max_rows = 0\n max_cols = 0\n module_count = 0\n for item in self.plot_request:\n module_num = int(item[MODULE][-2:])\n module_count = max(module_count, module_num)\n max_rows = max(max_rows, item[SUBPLOT][0])\n max_cols = max(max_cols, item[SUBPLOT][1])\n # this requires plot_requests to be sorted in order of module keys\n item[SUBPLOT] = (item[SUBPLOT][0], item[SUBPLOT][1] + max_cols * (module_num - 1))\n self.sub_plots.append(item[SUBPLOT])\n self.figure_data[DIMENSIONS] = (max_rows, max_cols * module_count)\n self.figure_data[SUBPLOTS] = self.sub_plots\n self.figure_data[TIME_ALIVE_CYCLES] = self.model_data[TIME_ALIVE_CYCLES]\n\n def translate_cell_map(self, request):\n module_data = self.model_data[MODULES][request[MODULE]]\n dimensions = module_data[DIMENSIONS]\n request[X_LABEL] = 'Columns'\n request[Y_LABEL] = 'Rows'\n request[X_RANGE] = (- 0.5, dimensions[1] - 0.5)\n request[Y_RANGE] = (- 0.5, dimensions[0] - 0.5)\n request[TITLE] = module_data[DESCRIPTION] + f' {request[MODULE]}'\n request[CELLS] = module_data[CELLS]\n request[AXONS] = module_data[AXONS]\n request[DENDRITES] = module_data[DENDRITES]\n request[SPINES] = module_data[SPINES]\n request[SYNAPSES] = module_data[SYNAPSES]\n self.plot_data.append(request)\n\n def translate_heat_map(self, request):\n # adds: data['xy-data'], data['v-min'], data['v-max']\n module_data = self.model_data[MODULES][request[MODULE]]\n dimensions = module_data[DIMENSIONS]\n request[V_MIN] = -100\n request[V_MAX] = 20\n request[X_LABEL] = 'Columns'\n request[Y_LABEL] = 'Rows'\n request[X_RANGE] = (- 0.5, dimensions[1] - 0.5)\n request[Y_RANGE] = (- 0.5, dimensions[0] - 0.5)\n request[TITLE] = module_data[DESCRIPTION] + f' {request[MODULE]}'\n variable = request[VARIABLE]\n xy_data = np.full(dimensions, REST_POTENTIAL)\n for key, node in module_data[NODES].items():\n row = key[0]\n column = key[1]\n xy_data[row][column] = node[variable]\n request[XY_DATA] = xy_data\n self.plot_data.append(request)\n\n def translate_lines(self, request):\n variable = request[VARIABLE]\n items = request[ITEMS]\n if variable == POTENTIAL:\n request.update(\n {TITLE: f'Potentials (mV) for {items}\\n in module {request[MODULE]}',\n Y_LABEL: 'Potentials (mV)', X_LABEL: 'Time (sec)',\n Y_RANGE: [-150.0, 150.0], X_RANGE: [-50, 0], Y_BASE: REST_POTENTIAL})\n elif variable == LENGTH:\n request.update(\n {TITLE: f'Length (microns) for {items}\\n in module {request[MODULE]}',\n Y_LABEL: 'Length (microns)', X_LABEL: 'Time (sec)',\n Y_RANGE: [0.0, 1.5], X_RANGE: [-50, 0], Y_BASE: REST_POTENTIAL})\n elif variable == STRENGTH:\n request.update(\n {TITLE: f'Strength (ion channels) for {items}\\n in module {request[MODULE]}',\n Y_LABEL: 'Strength (ion channels)', X_LABEL: 'Time (sec)',\n Y_RANGE: [0.0, MAX_STRENGTH * 1.1], X_RANGE: [-50, 0], Y_BASE: 0})\n else:\n request.update(\n {TITLE: f'Unrecognised variable \"{variable}\"\" requested',\n Y_LABEL: 'None', X_LABEL: 'None',\n Y_RANGE: [0, 1], X_RANGE: [-50, 0], Y_BASE: 0})\n # copy data values to the expanded request\n data = self.model_data[MODULES][request[MODULE]]\n item_keys = request[ITEM_KEYS]\n y_data = {}\n for key in item_keys:\n y_data[key] = data[items][key][variable]\n\n request[Y_DATA] = y_data\n self.plot_data.append(request)\n","sub_path":"src/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":6004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"632753903","text":"#-*- coding:UTF-8 -*-\nimport utils.adsh_loss as al\nimport utils.data_processing as dp\nimport utils.cnn_model as cnn_model\nimport utils.subset_sampler as subsetsampler\nimport utils.calc_hr as calc_hr\n\nimport pickle\nimport os\nimport argparse\nimport logging\nimport torch\nimport time\n\nimport numpy as np\nimport torch.optim as optim\nimport torchvision.transforms as transforms\nimport torch.autograd as autograd\nimport torch.nn as nn\n\nfrom datetime import datetime\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\n\n#cy dg\nparser = argparse.ArgumentParser(description=\"ADSH demo\")\n#parser.add_argument('--bits', default='12,24,32,48', type=str,\n# help='binary code length (default: 12,24,32,48)')\nparser.add_argument(\n '--bits',\n default='12',\n type=str,\n help='binary code length (default: 12,24,32,48)')\nparser.add_argument(\n '--gpu', default='1', type=str, help='selected gpu (default: 1)')\nparser.add_argument(\n '--arch',\n default='resnet50',\n type=str,\n help='model name (default: resnet50)')\nparser.add_argument(\n '--max-iter', default=50, type=int, help='maximum iteration (default: 50)')\nparser.add_argument(\n '--epochs', default=10, type=int, help='number of epochs (default: 3)')\nparser.add_argument(\n '--batch-size', default=32, type=int, help='batch size (default: 64)')\n\nparser.add_argument(\n '--num-samples',\n default=2000,\n type=int,\n help='hyper-parameter: number of samples (default: 2000)')\nparser.add_argument(\n '--num-triplet-samples',\n default=50,\n type=int,\n help='hyper-parameter: number of triplet samples (default: 100)')\nparser.add_argument(\n '--alpha',\n default=1,\n type=int,\n help='hyper-parameter: alpha (default: 1)')\nparser.add_argument(\n '--gamma',\n default=200,\n type=int,\n help='hyper-parameter: gamma (default: 200)')\nparser.add_argument(\n '--lamda',\n default=1,\n type=int,\n help='hyper-parameter: lamda (default: 0)')\nparser.add_argument(\n '--learning-rate',\n default=0.01, \n type=float,\n help='hyper-parameter: learning rate (default: 10**-3)')\nparser.add_argument(\n '--momentum',\n default = 0,\n type=float,\n help='hyper-parameter: momentum (default: 0)')\nparser.add_argument(\n '--num-label',\n default=10,\n type=int,\n help='hyper-parameter: number of labels (default: 10)')\nparser.add_argument(\n '--model-save-path',\n default='/home/cy/ADSH_pytorch/ADSH_pytorch/model-10-labels',\n type=str,\n help=\n 'model save path (default: /home/cy/ADSH_pytorch/ADSH_pytorch/model-10-labels-lr-0.1)'\n)\n\nparser.add_argument(\n '--num-testing',\n default=1000,\n type=int,\n help='hyper-parameter: number of test from train data (default: 1000)')\n\n\ndef _logging():\n os.mkdir(logdir)\n global logger\n logfile = os.path.join(logdir, 'log.log')\n logger = logging.getLogger('')\n logger.setLevel(logging.INFO)\n fh = logging.FileHandler(logfile)\n fh.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n\n _format = logging.Formatter(\"%(name)-4s: %(levelname)-4s: %(message)s\")\n fh.setFormatter(_format)\n ch.setFormatter(_format)\n\n logger.addHandler(fh)\n logger.addHandler(ch)\n return\n\n\ndef _record():\n global record\n record = {}\n record['train loss'] = []\n record['iter time'] = []\n record['param'] = {}\n return\n\n\ndef _save_record(record, filename):\n with open(filename, 'wb') as fp:\n pickle.dump(record, fp)\n return\n\n\ndef encoding_onehot(target, nclasses=10):\n target_onehot = torch.FloatTensor(target.size(0), nclasses)\n target_onehot.zero_()\n target_onehot.scatter_(1, target.view(-1, 1), 1)\n return target_onehot\n\n\ndef _dataset(num_label):\n normalize = transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n transformations = transforms.Compose([\n transforms.Scale(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(), normalize\n ])\n\n dset_database = dp.DatasetProcessingNUS_WIDE(\n '/home/cy/NUSWIDE', 'cy_TrainImagelist.txt', 'cy_Train_Tags1k.txt',\n num_label, transformations)\n\n dset_test = dp.DatasetProcessingNUS_WIDE(\n '/home/cy/NUSWIDE', 'cy_TestImagelist.txt', 'cy_Test_Tags1k.txt',\n num_label, transformations)\n\n num_database, num_test = len(dset_database), len(dset_test)\n\n index_database, index_test = dset_database.pic_index_has_label(),dset_test.pic_index_has_label()\n\n databaselabels = np.loadtxt(\n '/home/cy/NUSWIDE/cy_Train_Tags1k.txt', dtype=np.int64)\n\n databaselabels = databaselabels[index_database, :num_label]\n\n testlabels = np.loadtxt(\n '/home/cy/NUSWIDE/cy_Test_Tags1k.txt', dtype=np.int64)\n testlabels = testlabels[index_test, :num_label]\n\n databaselabels = torch.from_numpy(databaselabels)\n testlabels = torch.from_numpy(testlabels)\n\n dsets = (dset_database, dset_test)\n nums = (num_database, num_test)\n labels = (databaselabels, testlabels)\n\n return nums, dsets, labels\n\n\n#return S:query_num*train_num\ndef calc_sim(database_label, train_label, num_label):\n #大于0的置为1,cy\n S = (database_label.mm(train_label.t()) > 0).type(torch.FloatTensor)\n '''\n soft constraint\n '''\n\n r = S.sum() / (num_label - S).sum()\n S = S * (1 + r) - r\n\n return S\n\n\n#每次传入的U的数量为num_samples,这个函数用的是numpy操作,只在query data中统计三元损失.\ndef calc_loss_query(V, U, S, S_query, code_length, select_index, alpha, gamma, lamda):\n num_database = V.shape[0]\n square_loss = (U.dot(V.transpose()) - code_length * S)**2\n U_dot_U = U.dot(U.transpose())\n V_omega = V[select_index, :]\n quantization_loss = (U - V_omega)**2\n loss = (alpha * square_loss.sum() + gamma * quantization_loss.sum()) / (\n opt.num_samples * num_database)\n\n sum_triplet_loss = 0\n for i in range(opt.num_samples):\n #输出为turple\n _index_unsim = (S_query[i] < 0).nonzero()\n select_index_unsim = list(\n np.random.permutation(list(\n _index_unsim[0])))[0:opt.num_triplet_samples]\n\n _index_sim = (S_query[i] > 0).nonzero()\n select_index_sim = list(np.random.permutation(list(\n _index_sim[0])))[0:opt.num_triplet_samples]\n\n _temp = np.maximum(\n (U_dot_U[i, select_index_unsim] - U_dot_U[i, select_index_sim]) /\n 2.0 + code_length / 2.0, 0)\n\n sum_triplet_loss += _temp.sum()\n\n loss += lamda * sum_triplet_loss / (\n opt.num_samples * num_database)\n\n return square_loss.sum() / (\n opt.num_samples * num_database), quantization_loss.sum() / (\n opt.num_samples), sum_triplet_loss / (\n opt.num_samples * opt.num_triplet_samples), loss\n\n\n#每次传入的U的数量为num_samples,这个函数用的是numpy操作,用query data在database中统计三元损失.\ndef calc_loss(V, U, S, code_length, select_index, alpha, gamma, lamda):\n num_database = V.shape[0]\n square_loss = (U.dot(V.transpose()) - code_length * S)**2\n U_dot_V = U.dot(V.transpose())\n V_omega = V[select_index, :]\n quantization_loss = (U - V_omega)**2\n loss = (alpha * square_loss.sum() + gamma * quantization_loss.sum()) / (\n opt.num_samples * num_database)\n\n sum_triplet_loss = 0\n\n for i in range(opt.num_samples):\n #输出为turple\n _index_unsim = (S[i] < 0).nonzero()\n select_index_unsim = list(np.random.permutation(\n list(_index_unsim[0])))[0:opt.num_triplet_samples]\n\n _index_sim = (S[i] > 0).nonzero()\n\n select_index_sim = list(np.random.permutation(\n list(_index_sim[0])))[0:opt.num_triplet_samples]\n\n _temp=np.maximum(( U_dot_V[i,select_index_unsim] - U_dot_V[i,select_index_sim]) / 2.0 +\n code_length / 2.0, 0)\n\n sum_triplet_loss += _temp.sum()\n\n loss+=lamda*sum_triplet_loss/(opt.num_samples*opt.num_triplet_samples)\n\n return square_loss.sum() / (\n opt.num_samples * num_database), quantization_loss.sum() / (\n opt.num_samples), sum_triplet_loss / (\n opt.num_samples * opt.num_triplet_samples), loss\n\n\ndef encode(model, data_loader, total_num_data, num_data, bit):\n B = np.zeros([num_data, bit], dtype=np.float32)\n for iter, data in enumerate(data_loader, 0):\n if iter == num_data:\n break\n #print \"正在迭代第\",iter,\"次\"\n data_input, _, data_ind = data\n data_input = Variable(data_input.cuda())\n output = model(data_input)\n B[data_ind.numpy(), :] = torch.sign(output.cpu().data).numpy()\n #print \"data_ind.numpy\",data_ind.numpy()\n\n return B\n\n\ndef adjusting_learning_rate(optimizer, iter):\n update_list = [10, 20, 30, 40, 50]\n if iter in update_list:\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr'] / 10\n\n\ndef adsh_algo(code_length):\n os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu\n torch.manual_seed(0)\n torch.cuda.manual_seed(0)\n '''\n parameter setting\n '''\n max_iter = opt.max_iter\n epochs = opt.epochs\n batch_size = opt.batch_size\n learning_rate = opt.learning_rate\n weight_decay = 5 * 10**-4\n num_samples = opt.num_samples\n gamma = opt.gamma\n lamda = opt.lamda\n alpha = opt.alpha\n num_label = opt.num_label\n model_save_path = opt.model_save_path\n num_testing = opt.num_testing\n momentum = opt.momentum\n\n record['param']['opt'] = opt\n record['param']['description'] = '[Comment: learning rate decay]'\n logger.info(opt)\n logger.info(code_length)\n logger.info(record['param']['description'])\n '''\n dataset preprocessing\n '''\n nums, dsets, labels = _dataset(num_label)\n num_database, num_test = nums\n dset_database, dset_test = dsets\n database_labels, test_labels = labels\n\n #model construction\n\n model = cnn_model.CNNNet(opt.arch, code_length)\n model.cuda()\n adsh_loss = al.ADSHLoss(alpha, gamma, lamda, code_length, num_database)\n #weight_decay:网络参数正则化\n optimizer = optim.SGD(\n model.parameters(), lr=learning_rate, weight_decay=weight_decay, momentum=momentum)\n\n V = np.zeros((num_database, code_length))\n\n model.train()\n\n #每次训练的输入数据的size不变时,在开启的时候cudnn可以根据当前的设置来选择最优算法来加快训练速度,在这里有用\n torch.backends.cudnn.benchmark = True\n\n #1.每个iter任选num_samples个样本,即用来训练生成函数.\n #2.完成若干个epoch,每个epoch中执行若干次以batchsize为单位的操作\n for iter in range(max_iter):\n iter_time = time.time()\n\n #sampling and construct similarity matrix\n\n select_index = list(np.random.permutation(\n range(num_database)))[0:num_samples]\n\n _sampler = subsetsampler.SubsetSampler(select_index)\n\n #每次按照sample采样规律(这里是给定了num_samples个索引),返回batch_size个数据\n trainloader = DataLoader(\n dset_database,\n batch_size=batch_size,\n sampler=_sampler,\n shuffle=False,\n num_workers=4)\n\n #learning deep neural network: feature learning\n\n sample_label = database_labels.index_select(\n 0, torch.from_numpy(np.array(select_index)))\n\n #cy\n Sim = calc_sim(sample_label, database_labels, 1)\n U = np.zeros((num_samples, code_length), dtype=np.float)\n for epoch in range(epochs):\n for iteration, (train_input, train_label,\n batch_ind) in enumerate(trainloader):\n batch_size_ = train_label.size(0)\n #print \"iter\",iteration,\"batch_size\",batch_size\n\n u_ind = np.linspace(\n iteration * batch_size,\n np.min((num_samples, (iteration + 1) * batch_size)) - 1,\n batch_size_,\n dtype=int)\n #是一个batch_size\n train_input = Variable(train_input.cuda())\n\n output = model(train_input)\n S = Sim.index_select(0, torch.from_numpy(u_ind))\n #cy\n S_query = calc_sim(sample_label[u_ind, :],\n sample_label[u_ind, :], 1)\n U[u_ind, :] = output.cpu().data.numpy()\n\n model.zero_grad()\n loss = adsh_loss(output, V, S, S_query,\n V[batch_ind.cpu().numpy(), :], 1)\n loss.backward()\n optimizer.step()\n\n #print \"optimizer args:\", optimizer.state_dict()\n\n adjusting_learning_rate(optimizer, iter)\n\n #learning binary codes: discrete coding\n\n\n barU = np.zeros((num_database, code_length))\n barU[select_index, :] = U\n Q = -2 * code_length * Sim.cpu().numpy().transpose().dot(\n U) - 2 * gamma * barU\n for k in range(code_length):\n sel_ind = np.setdiff1d([ii for ii in range(code_length)], k)\n V_ = V[:, sel_ind]\n Uk = U[:, k]\n U_ = U[:, sel_ind]\n V[:, k] = -np.sign(Q[:, k] + 2 * V_.dot(U_.transpose().dot(Uk)))\n\n iter_time = time.time() - iter_time\n S_query=calc_sim(sample_label,sample_label,1)\n\n square_loss_, quanty_loss_, triplet_loss_, loss_ = calc_loss(\n V, U,\n Sim.cpu().numpy(),\n code_length, select_index, alpha, gamma,\n lamda)\n logger.info(\n '[Iteration: %3d/%3d][square Loss: %.4f][quanty Loss: %.4f][triplet Loss: %.4f][train Loss: %.4f]',\n iter, max_iter, square_loss_, quanty_loss_, triplet_loss_, loss_)\n record['train loss'].append(loss_)\n record['iter time'].append(iter_time)\n\n #training procedure finishes, evaluation\n\n torch.save(model, model_save_path)\n print (\"model saved!\")\n\n model.eval()\n\n testloader = DataLoader(\n dset_test, batch_size=1, shuffle=False, num_workers=4)\n\n qB = encode(model, testloader, num_test, num_testing, code_length)\n rB = V\n #计算有序性\n\n map = calc_hr.calc_map(qB, rB,\n database_labels.numpy()[0:num_testing],\n database_labels.numpy())\n\n logger.info('[Evaluation: mAP: %.4f]', map)\n record['rB'] = rB\n record['qB'] = qB\n record['map'] = map\n filename = os.path.join(logdir, str(code_length) + 'bits-record.pkl')\n\n _save_record(record, filename)\n\n\ndef adsh_eval(code_length):\n os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu\n model_save_path = opt.model_save_path\n num_label = opt.num_label\n num_testing = opt.num_testing\n\n model = torch.load(model_save_path)\n\n inf=pickle.load(open('./log/log-ADSH-cifar10-18-08-02-09-23-57/12bits-record.pkl'))\n V=inf['rB']\n\n model.eval()\n\n '''\n dataset preprocessing\n '''\n\n nums, dsets, labels = _dataset(num_label)\n num_database, num_test = nums\n dset_database, dset_test = dsets\n database_labels, test_labels = labels\n\n testloader = DataLoader(\n dset_test, batch_size=1, shuffle=False, num_workers=4)\n\n #print (\"num test\",num_test)\n qB = encode(model, testloader, num_test, num_testing, code_length)\n\n #qB = V[0:1000]\n rB = V\n #计算有序性\n\n map = calc_hr.calc_map(qB, rB,\n test_labels.numpy(),\n database_labels.numpy())\n\n print (map)\n #logger.info('[Evaluation: mAP: %.4f]', map)\n\n\nif __name__ == \"__main__\":\n global opt, logdir\n opt = parser.parse_args()\n logdir = '-'.join(['log/log-ADSH-cifar10', datetime.now().strftime(\"%y-%m-%d-%H-%M-%S\")])\n _logging()\n _record()\n\n bits = [int(bit) for bit in opt.bits.split(',')]\n for bit in bits:\n #adsh_eval(bit)\n adsh_algo(bit)\n","sub_path":"ADSH_pytorch/ADSH_NUSWIDE.py","file_name":"ADSH_NUSWIDE.py","file_ext":"py","file_size_in_byte":15909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"614618595","text":"import helper_function.ask_y_n_statement as ask\nimport add_edit.add_new as add_new\nimport add_edit.edit_record as edit_record\n \n\t\ndef update_single(conn, cursor, table, column, file_number, var):\n # update a single column in a sql db. Key is file_number.\n sql_update = \"UPDATE \" + table + \" SET \" + column + \"= ? WHERE file_number = '\" + file_number + \"'\"\n cursor.execute(sql_update, [var])\n conn.commit()\n\n\ndef insert(conn, cursor, table, columns, data):\n # insert data in multiple cols in a sql db. adds a new row\n col_number = len(data)\n place_holder = [\"?\"] * col_number\n place_str = \",\".join(place_holder)\n sql_insert = \"INSERT INTO \" + table + \"(\" + columns + \") VALUES (\" + place_str + \")\"\n cursor.execute(sql_insert, data)\n conn.commit()\n\ndef insert_file_number (conn, cursor, file_number):\n # insert data in multiple cols in a sql db. adds a new row\n sql_insert = \"INSERT INTO Patient_Information_History(file_number) VALUES (?)\"\n cursor.execute(sql_insert, file_number)\n conn.commit()\n\ndef update_multiple(conn, cursor, table, columns, file_number, data):\n # update multiple columns in a sql db. Key is file_number.\n col_number = len(data)\n for index in range(0, col_number):\n sql_update = \"UPDATE \" + table + \" SET \" + columns[index] + \"= ? WHERE file_number = '\" + file_number + \"'\"\n var = data[index]\n cursor.execute(sql_update, [var])\n conn.commit()\n\n\ndef add_columns(cursor, table, columns):\n col_number = len(columns)\n for index in range(0, col_number):\n sql_add = \"ALTER TABLE \" + table + \" ADD \" + columns[index]\n cursor.execute(sql_add)\n\ndef review_input (file_number, columns, data):\n col_number = len (data)\n print (\"Entries for database are as follows : \")\n for index in range (0, col_number):\n print (columns[index] +\": \" + data[index])\n ans = ask.ask_y_n(\"Are entries for file \"+ file_number+ \" correct ?\", True, False)\n return ans\n\ndef review_data (conn, cursor, table, file_number, col_list):\n sql_statement = ('SELECT '+ \", \".join(col_list) +' FROM '+ table + \" WHERE file_number = '\" +file_number+\"'\")\n data = cursor.execute(sql_statement)\n data_list = data.fetchall()\n data_list = list(data_list[0])\n col_number = len(col_list)\n if data_list== [None]*len(data_list):\n print(\"This section of the database has not been entered\")\n enter = ask.ask_y_n(\"Do you want to enter now\")\n return enter\n if None in set(data_list):\n print(\"Some entries are missing from the database: \")\n for index in range (0, col_number):\n print (col_list[index]+ \" : \" + str(data_list[index]))\n enter = ask.ask_option(\"Do you want to proceed?\", [\"Edit all\", \"Add new data only\"])\n if enter == \"Edit all\":\n return True\n else:\n edit_few(conn, cursor, table, col_list, file_number, data_list)\n else:\n print(\"Entries present in database are as follows : \")\n for index in range (0, col_number):\n print (col_list[index]+ \" : \" + str(data_list[index]))\n enter = ask.ask_option(\"Do you want to\", [\"Edit all\", \"Edit some entries\", \"Edit None\"])\n if enter == \"Edit some entries\":\n for index in range(0, col_number):\n print(col_list[index] + \" : \" + str(data_list[index]))\n edit = ask.ask_y_n(\"Edit\")\n if edit:\n data = input(\"Data for \" + col_list[index] + \": \")\n update_single(conn, cursor, table, col_list[index], file_number, data)\n return False\n elif enter == \"Edit all\":\n return True\n else:\n return False\n\ndef edit_few(conn, cursor, table, col_list, file_number, data_list):\n col_number = len (col_list)\n for index in range (0, col_number):\n if data_list[index] == None:\n data = input (\"Data for \"+col_list[index]+\": \")\n update_single(conn, cursor, table, col_list[index], file_number, data)\n return False\n\n\ndef check_file(conn, cursor, table, file_number, user_name, folders):\n sql_statement = \"SELECT rowid FROM \" + table + \" WHERE file_number = ?\"\n cursor.execute(sql_statement, (file_number, ))\n data = cursor.fetchall()\n if len(data) == 0:\n if table != \"Follow_up_Data\":\n cursor.execute(\"INSERT INTO \" + table + \"(file_number) VALUES ('\" + file_number + \"')\")\n print(file_number + \" does not exist in table \" + table + \". Enter new record\")\n add_new.add_new(conn, cursor, file_number, table, user_name,folders)\n else:\n todo = ask.ask_option(file_number + \" already exists in table \" + table + \".\",\n [\"Edit record\", \"Add new record for same file number\", \"Edit None\"])\n if todo == \"Edit record\":\n edit_record.edit_record(conn, cursor, file_number, table, user_name, folders)\n elif todo == \"Add new record for same file number\":\n print(\"Add additional record module TBD\")\n ask_table = ask.ask_y_n(\"Add another table?\")\n return ask_table\n\ndef review_df(df):\n print(df.to_string())\n check = ask.ask_y_n(\"Is data entered correct?\")\n return check\n\n\ndef table_check(cursor, table_name):\n x = cursor.execute(\"SELECT count(*) FROM sqlite_master WHERE type='table' AND name='\" + table_name + \"'\")\n [table_exists] = cursor.fetchall()\n test = list(table_exists)[0]\n return test\n\n\ndef view_multiple(conn, table, col_list, file_number):\n import pandas as pd\n from helper_function.ask_y_n_statement import ask_option\n sql_statement = ('SELECT '+ \", \".join(col_list) +' FROM '+ table + \" WHERE file_number = '\" +file_number+\"'\")\n df = pd.read_sql(sql_statement, conn)\n print_df(df)\n enter = ask_option(\"Do you want to add or edit data\", [\"Add data\", 'Edit data', 'Do not add or edit'])\n return enter\n\n\ndef delete_multiple(cursor, table, file_number):\n sql_statement = \"DELETE FROM \" + table + \" WHERE file_number = '\" + file_number + \"'\"\n cursor.execute(sql_statement)\n\n\ndef delete_rows(cursor, table, col_name, col_data):\n sql_statement = \"DELETE FROM \" + table + \" WHERE \"+col_name+\" = '\" + col_data + \"'\"\n cursor.execute(sql_statement)\n\ndef review_df_row(df):\n import helper_function.ask_y_n_statement as ask\n check_row = len(df)-1\n print(df.iloc[check_row].to_string())\n check = ask.ask_y_n(\"Is data entered correct?\")\n if check:\n return check, df\n else:\n df = df.drop(df.index[check_row])\n return check, df\n\ndef get_sql_data(file_number, conn, module, table):\n import helper_function.table_dicts as table_dicts\n import pandas as pd\n columns = []\n cols = table_dicts.db_dict(table, module)\n columns = columns + cols\n col_list = table_dicts.create_col_list(columns)\n sql_statement = ('SELECT ' + \", \".join(col_list) + \" FROM '\" + str(table) + \"' WHERE file_number = '\" + file_number + \"'\")\n df = pd.read_sql(sql_statement, conn)\n return df\n\ndef get_value(col_name, table, file_number, cursor, error_statement):\n try:\n sql_statement = \"SELECT \"+col_name+\" FROM \" +table+\" WHERE file_number = '\" + file_number + \"'\"\n cursor.execute(sql_statement)\n value_ = cursor.fetchall()\n value = value_[0][0]\n except:\n value = input(error_statement)\n return value\n\ndef print_df(df):\n rows = (df.shape)[0]\n for row in range(0, rows):\n print(df.iloc[row].to_string() + '\\n')\n\ndef edit_table(df, pk_col, df_col):\n import helper_function.ask_y_n_statement as ask\n rows = (df.shape)[0]\n for row in range(0,rows):\n print(df.iloc[row].to_string()+'\\n')\n to_correct = ask.ask_y_n(\"Are entries correct?\")\n if not to_correct:\n to_correct = ask.ask_y_n(\"Re-enter entire table?\")\n if to_correct:\n return to_correct, df\n else:\n change_row = True\n while change_row:\n pk_list = list(df[pk_col])\n print(pk_list)\n pk = input(\"Enter \" + pk_col + \" to change: \")\n index = pk_list.index(id)\n to_do = True\n while to_do:\n print(df.loc[index, :])\n col_change = ask.ask_option(\"Name of column to change\", df_col)\n old_val = df.loc[index, col_change]\n print(old_val + '\\n')\n new_val = input(\"Enter correct value for \" + col_change + ' for ' + pk + \": \")\n df.loc[index, col_change] = new_val\n print(df.iloc[index].to_string() + '\\n')\n to_do = ask.ask_y_n(\"Make more changes to \" + pk_col + ' ' + pk + '?')\n print_df(df)\n change_row = ask.ask_y_n(\"Change another row?\")\n to_correct = False\n return to_correct, df\n\n\ndef get_block_id_multiple (col_name, table, file_number, block_type, cursor):\n #mutliple results for block_id with multiple entries and 1 block_type\n try:\n sql_statement = \"SELECT \"+ col_name +\" FROM \" +table+\" WHERE file_number = '\" + file_number + \"' AND block_type = '\"\\\n + block_type + \"'\"\n cursor.execute(sql_statement)\n values = cursor.fetchall()\n value = [value[0] for value in values]\n except:\n value = 'NA'\n return value\n\ndef last_update():\n from datetime import datetime\n last_update = datetime.now().strftime(\"%Y-%b-%d %H:%M\")\n return last_update","sub_path":"sql/add_update_sql_changed.py","file_name":"add_update_sql_changed.py","file_ext":"py","file_size_in_byte":9516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"305479014","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 28 14:43:54 2019\n\nRead and process file\nFigure 23.12; Introduction to Computation and Programming Using Python\n\n@author: kemcrimmins\n\"\"\"\n\nimport pylab\n\ndef readMammalData(fName):\n dataFile = open(fName, 'r')\n numFeatures = 0\n #Process lines at top of file\n for line in dataFile: #Find number of features\n if line[0:6] == '#Label': #indicates end of features\n break\n if line[0:5] != '#Name':\n numFeatures += 1\n featureVals = [] #I don't think this line is necessary...\n \n #Produce featureVals, speciesNames, and labelLists\n featureVals, speciesNames, labelList = [], [], []\n for i in range(numFeatures):\n featureVals.append([])\n \n #Continue processing line in files, starting after comments\n for line in dataFile:\n #remove newline, then split\n dataLine = line[:-1].split(',')\n speciesNames.append(dataLine[0])\n classLabel = dataLine[-1]\n labelList.append(classLabel)\n for i in range(numFeatures):\n featureVals[i].append(float(dataLine[i+1]))\n \n #Use featureVals to build list containing the feature vectors\n #for each mammal\n featureVectorList = [] #print(len(speciesNames)) #testing; speciesName has only one element!\n for mammal in range(len(speciesNames)):\n featureVector = []\n for feature in range(numFeatures):\n featureVector.append(featureVals[feature][mammal])\n featureVectorList.append(featureVector)\n return featureVectorList, labelList, speciesNames\n \ndef buildMammalExamples(featureList, labelList, speciesNames):\n examples = []\n for i in range(len(speciesNames)):\n features = pylab.array(featureList[i])\n example = Example(speciesNames[i], features, labelList[i])\n examples.append(example)\n return examples\n ","sub_path":"clustering/readAndProcessFile.py","file_name":"readAndProcessFile.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"556050703","text":"import json\nimport requests\n\nfrom flask import Flask, request, jsonify\n\napp = Flask(__name__)\nurl = ('https://spoqa.slack.com/services/hooks/incoming-webhook'\n '?token=EqCiU0T2MCimJLsn30j7JUPF')\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n data = json.loads(request.data)\n if request.headers['X-GitHub-Event'] == 'issue_comment':\n for label in data['issue']['labels']:\n payload = {\n \"username\": \"github\",\n \"icon_emoji\": \":octocat:\",\n \"channel\": u\"#{0}\".format(label['name']),\n \"text\": u\"#{0} @{1}: {2}\\n<{3}>\".format(\n data['issue']['number'],\n data['comment']['user']['login'],\n data['comment']['body'],\n data['comment']['html_url']\n )\n }\n r = requests.post(url, data=json.dumps(payload))\n if request.headers['X-GitHub-Event'] == 'issues':\n for label in data['issue']['labels']:\n payload = {\n \"username\": \"github\",\n \"con_emoji\": \":octocat:\",\n \"channel\": u\"#{0}\".format(label['name']),\n \"text\": u\"#{0} {1} by @{2}\\n{3}\\n<{4}>\".format(\n data['issue']['number'],\n data['issue']['title'],\n data['issue']['user']['login'],\n data['issue']['body'],\n data['issue']['html_url']\n )\n }\n r = requests.post(url, data=json.dumps(payload))\n return jsonify(result='success')\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"286273367","text":"from bo.Group import Group\nfrom db.Mapper import Mapper\n\n\nclass GroupMapper(Mapper):\n \"\"\"Mapper-Klasse, die Gruppen-Objekte auf der relationalen DB abbildet.\n Das Mapping ist bidirektional. D.h., Objekte können\n in DB-Strukturen und DB-Strukturen in Objekte umgewandelt werden.\n \"\"\"\n def __init__(self):\n super().__init__()\n\n def find_all(self):\n \"\"\"Auslesen aller vorhandenen Gruppen\n :return Eine Sammlung aller Gruppen-Objekten.\n \"\"\"\n cursor = self._connection.cursor()\n command = \"SELECT * FROM holma.group\"\n cursor.execute(command)\n tuples = cursor.fetchall()\n\n result = Group.from_tuples(tuples)\n\n self._connection.commit()\n cursor.close()\n\n return result\n\n def find_by_id(self, group_id):\n \"\"\"Eindeutiges Auslesen einer Gruppe durch ID\n :param group_id:\n :return Gruppen-Objekt, das der übergebenen ID entspricht oder None\n wenn DB-Tupel nicht vorhanden ist.\n \"\"\"\n cursor = self._connection.cursor()\n command = \"SELECT * FROM holma.group \" \\\n \"WHERE group_id={}\".format(group_id)\n cursor.execute(command)\n tuples = cursor.fetchall()\n\n result = Group.from_tuples(tuples)\n\n self._connection.commit()\n cursor.close()\n\n if len(result) == 0:\n return None\n return result[0]\n\n def find_by_name(self, name):\n \"\"\"Auslesen von Gruppen durch Name\n :param name:\n :return Eine Sammlung mit Gruppen-Objekten.\n \"\"\"\n cursor = self._connection.cursor()\n command = \"SELECT * FROM holma.group WHERE name LIKE '{}' \" \\\n \"ORDER BY name\".format(name)\n cursor.execute(command)\n tuples = cursor.fetchall()\n\n result = Group.from_tuples(tuples)\n\n self._connection.commit()\n cursor.close()\n\n return result\n\n def find_by_owner(self, user_id):\n \"\"\"Auslesen von Gruppen durch Fremdschlüssel (user_id) geg. Owner\n :param user_id:\n :return Eine Sammlung mit Gruppen-Objekten.\n \"\"\"\n cursor = self._connection.cursor()\n command = \"SELECT * FROM holma.group WHERE owner={}\".format(user_id)\n cursor.execute(command)\n tuples = cursor.fetchall()\n\n result = Group.from_tuples(tuples)\n\n self._connection.commit()\n cursor.close()\n\n return result\n\n def insert(self, group):\n \"\"\"Einfügen eines Gruppen-Objekts\n\n lastrowid returns the value generated for an AUTO_INCREMENT\n column by the previous INSERT\n :param group:\n :return das bereits übergebene Gruppen-Objekt, jedoch mit korrekter ID\n \"\"\"\n cursor = self._connection.cursor()\n command = \"INSERT INTO holma.group (group_id, name, creation_date, \" \\\n \"owner, last_updated) VALUES (%s, %s, %s, %s, %s)\"\n data = (group.get_id(),\n group.get_name(),\n group.get_creation_date(),\n group.get_owner(),\n group.get_last_updated())\n cursor.execute(command, data)\n self._connection.commit()\n cursor.close()\n\n group.set_id(cursor.lastrowid)\n return group\n\n def update(self, group):\n \"\"\"Wiederholtes Schreiben / Aktualisieren eines Gruppen-Objekts\n :param group:\n :return aktualisiertes Gruppen-Objekt\n \"\"\"\n cursor = self._connection.cursor()\n command = \"UPDATE holma.group SET name=%s, owner=%s, \" \\\n \"last_updated=%s WHERE group_id=%s\"\n data = (group.get_name(),\n group.get_owner(),\n group.get_last_updated(),\n group.get_id())\n cursor.execute(command, data)\n\n self._connection.commit()\n cursor.close()\n\n return group\n\n def delete(self, group):\n \"\"\"Löschen der Daten eines Gruppen-Objekts aus der Datenbank\n :param group:\n \"\"\"\n cursor = self._connection.cursor()\n command = \"DELETE FROM holma.group \" \\\n \"WHERE group_id={}\".format(group.get_id())\n cursor.execute(command)\n\n self._connection.commit()\n cursor.close()\n\n def delete_owner(self, user, group=None):\n \"\"\"\n Löschen des Owners einer Gruppe aus der Datenbank\n\n Dabei wird unterschieden zwischen; Owner aus allen Gruppen löschen\n (wenn User gelöscht wird) und Owner aus angegebener Gruppe löschen.\n :param user:\n :param group:\n \"\"\"\n cursor = self._connection.cursor()\n if group is None:\n command = \"UPDATE holma.group SET owner= null WHERE owner = {}\"\\\n .format(user.get_id())\n else:\n command = \"UPDATE holma.group SET owner= null WHERE owner = {} \" \\\n \"AND group_id= {}\".format(user.get_id(), group.get_id())\n cursor.execute(command)\n\n self._connection.commit()\n cursor.close()\n\n\nif (__name__ == \"__main__\"):\n with GroupMapper() as mapper:\n print(\"All groups in database:\")\n result = mapper.find_all()\n for group in result:\n print(group)\n\n print(\"All groups owned by User #28:\")\n result = mapper.find_by_owner(28)\n for group in result:\n print(group)\n","sub_path":"src/db/GroupMapper.py","file_name":"GroupMapper.py","file_ext":"py","file_size_in_byte":5362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"385199766","text":"import sys\nimport unittest\nfrom io import StringIO\n\nfrom concourse import common\n\n\nclass TestCommon(unittest.TestCase):\n standard_payload = (\n \"\"\"\n {\n \"source\": {\n \"apiKey\": \"apiKey123\",\n \"secretKey\": \"secretKey321\"\n },\n \"version\": {\n \"ref\": \"version-v1-dev\"\n }\n }\n \"\"\")\n\n def setUp(self):\n self.common_instance = common.Common()\n\n def test_getPayload(self):\n put_stdin(self.standard_payload)\n self.common_instance.load_payload()\n result = self.common_instance.get_payload()\n self.assertEqual(result['source']['apiKey'], \"apiKey123\")\n self.assertEqual(result['source']['secretKey'], \"secretKey321\")\n self.assertEqual(result['version']['ref'], \"version-v1-dev\")\n\n def test_getApiKey(self):\n put_stdin(self.standard_payload)\n self.common_instance.load_payload()\n api_key = self.common_instance.get_api_key()\n self.assertEqual(api_key, \"apiKey123\")\n\n def test_getSecretKey(self):\n put_stdin(self.standard_payload)\n self.common_instance.load_payload()\n secret_key = self.common_instance.get_secret()\n self.assertEqual(secret_key, \"secretKey321\")\n\n\ndef put_stdin(content):\n sys.stdin = StringIO(content)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"opt/resource/concourse/test_common.py","file_name":"test_common.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"643629924","text":"__author__ = 'Tony'\n#coding=utf-8\n\n\nimport re\nimport urllib\nimport urllib2\nfrom bs4 import BeautifulSoup\nimport os\nimport glob\nimport eyed3\n\n\n# =========================================================================\n\npath = 'F:\\Music'\npattern = re.compile(r'http://www.xiami.com/song/')\n\n# =========================================================================\n\ndef open_page(url_new):\n values = {}\n user_agent ='Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0'\n headers = {\n 'User-Agent': user_agent,\n }\n data = urllib.urlencode(values)\n req = urllib2.Request(url_new, data, headers)\n response = urllib2.urlopen(req, timeout=5)\n html = response.read()\n # print html\n return html\ndef xiami_search(url):\n result = []\n soup = BeautifulSoup(open_page(url))\n #print soup.prettify()\n for link in soup.find_all('a'):\n if pattern.match(link.get('href')):\n if link.get('href') not in result:\n result.append(link.get('href'))\n # print link.get('href')\n # print result[1]\n return result[1]\ndef lyrics_find(url):\n soup = BeautifulSoup(open_page(url))\n # print(str(soup.find_all(class_=\"lrc_main\")))\n result = str(soup.find_all(class_=\"lrc_main\")).replace(\"[
\", \"\")\n if result != '[]':\n result = result.replace(\"\t\t\t\t
]\", \"\")\n result = result.replace(\"\t\t\t\t\", \"\")\n result = result.replace(\"
\", \"\")\n else:\n result = \"\"\n return result\ndef url_builder(word):\n return 'http://www.xiami.com/search?key=' + urllib.quote(word, '')\ndef xiami_music_spider(word):\n url = url_builder(word)\n url = xiami_search(url)\n return lyrics_find(url)\ndef song_database_update(path, filename):\n txt = open(filename + '.txt', 'wb')\n for songs in glob.glob(path + '\\*\\*\\*.mp3'):\n result = songs.decode('gbk').encode('utf-8') + '\\n'\n txt.write(result)\ndef song_name_only_generate():\n file_out = open('Song_Index.txt', 'wb')\n file = open('Song_Index_with_path.txt', 'r')\n for line in file.readlines():\n result = line.replace('\\\\', '', 1)\n file_out.write(result[result.rfind('\\\\') + 6:])\n file_out.close()\n file.close()\ndef get_song_info(path):\n song = eyed3.load(path)\n tag = song.tag\n artist = tag.artist\n title = tag.title\n return artist + ' ' + title\n\n# =========================================================================\n","sub_path":"Xiami_Music_Spider.py","file_name":"Xiami_Music_Spider.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"584562561","text":"import re\n\nimport spotipy\nimport yaml\n\n\ndef get_spotipy_object(token):\n sp = spotipy.Spotify(auth=token)\n try:\n sp.search('M')\n except spotipy.client.SpotifyException as e:\n print(f\"Credentials error!\", e)\n exit(-1)\n return sp\n\n\ndef create_playlist_from_track_ids(sp, user_id, playlist_name, playlist_description, is_public, track_ids):\n playlist_result = sp.user_playlist_create(user_id,\n playlist_name,\n public=is_public,\n description=playlist_description)\n playlist_id = playlist_result['id']\n sp.user_playlist_add_tracks(user_id, playlist_id, track_ids)\n playlist_url = playlist_result['external_urls']['spotify']\n return playlist_url\n\n\ndef get_song(sp, name, artist, album):\n try:\n result = sp.search(f\"{artist} {name}\")\n if not result['tracks']['items']:\n name = re.sub(r\"[\\(\\[].*?[\\)\\]]\", \"\", name).strip()\n result = sp.search(f\"{artist} {name}\")\n if not result['tracks']['items']:\n artist = artist.split('Vs.')[0].strip()\n result = sp.search(f\"{artist} {name}\")\n if not result['tracks']['items']:\n artist = artist.split('And')[0].strip()\n result = sp.search(f\"{artist} {name}\")\n for i in result['tracks']['items']:\n if any([i['artists'][0]['name'] == a for a in artist.split('&')]) and (i['name'] == name):\n return i\n else:\n result['tracks']['items'].sort(key=lambda x: x['popularity'])\n return result['tracks']['items'][-1]\n except:\n return None\n\n\ndef read_yaml(yaml_path):\n with open(yaml_path, 'r') as stream:\n try:\n config = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n raise RuntimeError(\"YAML error\")\n return config\n\n\ndef remove_duplicates_keep_order(lst):\n lst_no_dup = []\n [lst_no_dup.append(t) for t in lst if t not in lst_no_dup]\n return lst_no_dup\n\n\ndef track_ids_from_infos(track_infos):\n return [u['id'] for u in track_infos]\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"504152772","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n# Load the data\ndef loadData():\n with np.load(\"notMNIST.npz\") as data:\n Data, Target = data[\"images\"], data[\"labels\"]\n np.random.seed(521)\n randIndx = np.arange(len(Data))\n np.random.shuffle(randIndx)\n Data = Data[randIndx] / 255.0\n Target = Target[randIndx]\n trainData, trainTarget = Data[:10000], Target[:10000]\n validData, validTarget = Data[10000:16000], Target[10000:16000]\n testData, testTarget = Data[16000:], Target[16000:]\n return trainData, validData, testData, trainTarget, validTarget, testTarget\n\n# Implementation of a neural network using only Numpy - trained using gradient descent with momentum\ndef convertOneHot(trainTarget, validTarget, testTarget):\n newtrain = np.zeros((trainTarget.shape[0], 10))\n newvalid = np.zeros((validTarget.shape[0], 10))\n newtest = np.zeros((testTarget.shape[0], 10))\n\n for item in range(0, trainTarget.shape[0]):\n newtrain[item][trainTarget[item]] = 1\n for item in range(0, validTarget.shape[0]):\n newvalid[item][validTarget[item]] = 1\n for item in range(0, testTarget.shape[0]):\n newtest[item][testTarget[item]] = 1\n return newtrain, newvalid, newtest\n\n\ndef create_batch(data, batch_size):\n for i in range(0, len(data), batch_size):\n batch = data[i:i+batch_size]\n yield batch\n\n\n# should have 6 tf.get variable statements - one for the filter, one for the first bias to be added after applying the filter in step 2, \n# weight and bias matrices between steps 5 and 7, and weight and bias matrices for the final two steps.\n# shape defines the neural network (filter size x filter size, number of channels in input image, number of convolution filters)\nweights = {\n # 3 x 3 conv with 1 input and 32 outputs\n 'w1': tf.get_variable('W0', shape=(3,3,1,32), initializer=tf.contrib.layers.xavier_initializer()),\n 'w2': tf.get_variable('W1', shape=(14*14*32,784), initializer=tf.contrib.layers.xavier_initializer()), \n 'out': tf.get_variable('W2', shape=(784,10), initializer=tf.contrib.layers.xavier_initializer()), \n}\n\n# shape defines the number of bias parameters\nbiases = {\n 'b1': tf.get_variable('B0', shape=(32), initializer=tf.contrib.layers.xavier_initializer()),\n 'b2': tf.get_variable('B1', shape=(784), initializer=tf.contrib.layers.xavier_initializer()),\n 'out': tf.get_variable('B2', shape=(10), initializer=tf.contrib.layers.xavier_initializer()),\n}\n\n# neural network architecture\n# 1. Input Layer\n# 2. A 3 × 3 convolutional layer, with 32 filters, using vertical and horizontal strides of 1. \n# 3. ReLU activation\n# 4. A batch normalization layer\n# 5. A 2 × 2 max pooling layer\n\n# 6. Flatten layer\n# 7. Fully connected layer (with 784 output units, i.e. corresponding to each pixel)\n# 8. ReLU activation\n\n# 9. Fully connected layer (with 10 output units, i.e. corresponding to each class)\n# 10. Softmax output\n# 11. Cross Entropy loss\n\n# test with lambda [0.01, 0.1, 0.5]\nlamb = 0\n# test with probability of [0.9, 0.75, 0.5]\nprob = 1\nlearningRate = 0.0001\n\ndef conv_net(): \n\n #TF graph input\n #variable that we will assign data to later\n Xp = tf.placeholder(tf.float32, [None, 784])\n Y = tf.placeholder(tf.float32, [None, 10])\n\n # reshape x for calculations\n x = tf.reshape(Xp, shape=[-1, 28, 28, 1])\n\n ######## build convolutional network ########\n\n # step 1 and 2: A 3 × 3 convolutional layer, with 32 filters, using vertical and horizontal strides of 1\n medx = tf.nn.conv2d(x, filter=weights['w1'], strides=[1, 1, 1, 1], padding='SAME')\n medx = tf.nn.bias_add(medx, biases['b1'])\n\n # conv1 is a tensor, which is the output of the layer\n conv1 = tf.nn.relu(medx) \n \n # calculate the mean and variance for normalization\n meanx, variancex = tf.nn.moments(conv1, keep_dims=True, axes=[0,1,2])\n # A batch normalization layer\n x_norm = tf.nn.batch_normalization(x = conv1, mean = meanx, variance = variancex, offset = None, scale = None, variance_epsilon = 1e-6)\n\n # A 2 × 2 max pooling layer\n pooled = tf.nn.max_pool(x_norm, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n # flatten and fully connected\n fc1 = tf.reshape(pooled, [-1, weights['w2'].get_shape().as_list()[0]])\n fc1 = tf.add(tf.matmul(fc1, weights['w2']), biases['b2'])\n\n # dropout layer\n fc1 = tf.nn.dropout(fc1, prob)\n\n fc1 = tf.nn.relu(fc1)\n\n # another fully connected layer for 10 output units\n out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])\n\n # softmax output\n pred_y = tf.nn.softmax(out)\n\n # calculate the CE loss\n ce = tf.losses.softmax_cross_entropy(Y, pred_y)\n # add regularization\n regularization = lamb * (tf.nn.l2_loss(weights['w2']) + tf.nn.l2_loss(weights['out'])) \n cost = ce + regularization\n\n # calculate the accuracy by getting the index with the max probability\n # and then comparing it with the actual label\n equal = tf.equal(tf.argmax(pred_y, 1), tf.argmax(Y, 1))\n accuracy = tf.reduce_mean(tf.cast(equal, tf.float32))\n\n # adam optimizer\n optimizer = tf.train.AdamOptimizer(learning_rate = learningRate)\n train_optimizer = optimizer.minimize(cost)\n\n return Xp, Y, train_optimizer, cost, accuracy\n\nepochs = 50\nlossTrain = np.zeros(epochs)\naccuracyTrain = np.zeros(epochs)\nlossValid = np.zeros(epochs)\naccuracyValid = np.zeros(epochs)\nlossTest = np.zeros(epochs)\naccuracyTest = np.zeros(epochs)\n\n\n\ndef trainModel(x, y, epochs, xvalid, yvalid, xtest, ytest, batch_size):\n\n # reshape the data matrix\n m = x.shape[1] * x.shape[2]\n N = x.shape[0]\n\n # x is shaped into N x 784\n X = x.reshape(N,m)\n\n # build the graph\n Xp, Y, train_optimizer, cost, accuracy = conv_net()\n\n # init all the variables\n init = tf.global_variables_initializer() \n\n with tf.Session() as sess:\n\n sess.run(init) \n\n # iterate through the loop epochs times\n for i in range(0, epochs):\n\n # make the x and y into batches, and loop though the entire sample batch_size at a time\n for (x_batch, y_batch) in zip(create_batch(X, batch_size), create_batch(y, batch_size)):\n # run the adam optimized gradient descent algo\n sess.run(train_optimizer, feed_dict={Xp: x_batch, Y: y_batch})\n\n # store the losses every epoch\n lossTrain[i], accuracyTrain[i] = sess.run([cost, accuracy], feed_dict={Xp: X, Y: y})\n print(\"epoch number: \" + str(i+1))\n print(accuracyTrain[i])\n lossValid[i], accuracyValid[i] = sess.run([cost, accuracy], feed_dict={Xp: xvalid.reshape(xvalid.shape[0],m), Y: yvalid})\n lossTest[i], accuracyTest[i] = sess.run([cost, accuracy], feed_dict={Xp: xtest.reshape(xtest.shape[0],m), Y: ytest})\n\n # shuffle the data\n indicies = np.arange(N)\n np.random.shuffle(indicies)\n X = X[indicies]\n y = y[indicies]\n\n\n # plot the graphs\n plt.plot(lossTrain, label = 'Train', linewidth = \"2\")\n plt.plot(lossValid, label = 'Valid', linewidth = \"2\")\n plt.plot(lossTest, label = 'Test', linewidth = \"2\")\n axes = plt.gca()\n plt.legend()\n plt.title(\"batch size = \" + str(batch_size) + \", lambda = \" + str(lamb) + \", keep prob = \" + str(prob))\n plt.xlabel(\"epochs\")\n plt.ylabel(\"loss\")\n plt.show()\n print(\"final train loss \" + str(lossTrain[epochs-1]))\n print(\"final valid loss \" + str(lossValid[epochs-1]))\n print(\"final test loss \" + str(lossTest[epochs-1]))\n\n\n\n plt.plot(accuracyTrain, label = 'Train', linewidth = \"2\")\n plt.plot(accuracyValid, label = 'Valid', linewidth = \"2\")\n plt.plot(accuracyTest, label = 'Test', linewidth = \"2\")\n axes = plt.gca()\n plt.legend(loc = \"lower right\")\n plt.title(\"batch size = \" + str(batch_size) + \", lambda = \" + str(lamb) + \", keep prob = \" + str(prob))\n plt.xlabel(\"epochs\")\n plt.ylabel(\"accuracy\")\n plt.show()\n print(\"final train accuracy \" + str(accuracyTrain[epochs-1]))\n print(\"final valid accuracy \" + str(accuracyValid[epochs-1]))\n print(\"final test accuracy \" + str(accuracyTest[epochs-1]))\n\n\n return\n\n\ndef run():\n\n # get the dataset\n trainData, validData, testData, ltrainLabel, lvalidLabel, ltestLabel = loadData()\n\n # convert to one hot encoding for labels only, data remains the same\n # dimensions go from N to N x 10 -> 10 different kinds of classes\n trainLabel, validLabel, testLabel = convertOneHot(ltrainLabel, lvalidLabel, ltestLabel)\n\n batch_size = 32\n\n trainModel(trainData, trainLabel, epochs, validData, validLabel, testData, testLabel, batch_size)\n\n return\n\nrun()\n\n\n","sub_path":"Assignment 2/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":8822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"269420100","text":"# import datetime\nfrom decimal import Decimal\n\nfrom django.core.management.base import BaseCommand\n\nfrom leasing.enums import InvoiceState, InvoiceType\nfrom leasing.models import Lease # , PayableRent, ReceivableType\n\nknown_errors = {\n ('A1154-878', Decimal('19364.80')): 'known error in the old system',\n ('A1154-878', Decimal('11830.54')): 'known error in the old system',\n}\n\n\nclass Command(BaseCommand):\n help = 'Import data from the old MVJ'\n\n def handle(self, *args, **options):\n leases = Lease.objects.all().order_by('start_date')\n # leases = Lease.objects.filter(id__in=[11]).order_by('start_date')\n\n for lease in leases:\n # from_year = lease.start_date.year\n # to_year = datetime.date.today().year\n # year_range = range(from_year, to_year)\n\n self.stdout.write('Lease #{} {} '.format(lease.id, lease))\n\n rent = lease.rents.first()\n\n # Year rent...\n # for year in year_range:\n # year_start = datetime.date(year=year, month=1, day=1)\n # year_end = datetime.date(year=year, month=12, day=31)\n #\n # payable_rents = PayableRent.objects.filter(rent=rent, end_date__year=year)\n # for payable_rent in payable_rents:\n # # calculated_amount = round(rent.get_amount_for_date_range(\n # # payable_rent.start_date, payable_rent.end_date), 2)\n # calculated_amount = round(rent.get_amount_for_date_range(\n # year_start, year_end), 2)\n #\n # self.stdout.write(' Payable rent amount: {}'.format(payable_rent.amount))\n # self.stdout.write(' Calculated amount: {} {}'.format(\n # calculated_amount,\n # ' matches' if payable_rent.amount == calculated_amount else ' MISMATCH'\n # ))\n\n for invoice in lease.invoices.filter(type=InvoiceType.CHARGE, state=InvoiceState.PAID).order_by(\n 'billing_period_start_date'):\n calculated_amount = round(\n rent.get_amount_for_date_range(invoice.billing_period_start_date, invoice.billing_period_end_date),\n 2)\n extra_texts = []\n if invoice.total_amount != calculated_amount and \\\n round(invoice.total_amount) == round(calculated_amount):\n extra_texts.append('but close enough')\n\n if (str(lease.identifier), invoice.total_amount) in known_errors:\n extra_texts.append(known_errors[(str(lease.identifier), invoice.total_amount)])\n\n self.stdout.write(' Invoice #{} {} - {} amount: {} calculated amount: {} {} {}'.format(\n invoice.id,\n invoice.billing_period_start_date,\n invoice.billing_period_end_date,\n invoice.total_amount,\n calculated_amount,\n 'MISMATCH' if invoice.total_amount != calculated_amount else '',\n ' '.join(extra_texts),\n ))\n\n # for rent in lease.rents.all():\n # self.stdout.write(' Rent #{} {}'.format(rent.id, rent.type))\n #\n # for year in year_range:\n # self.stdout.write(' Year {}'.format(year))\n # year_start = datetime.date(year=year, month=1, day=1)\n # year_end = datetime.date(year=year, month=12, day=31)\n #\n # self.stdout.write(' Period {} - {}'.format(year_start, year_end))\n #\n # try:\n # payable_rents = PayableRent.objects.filter(rent=rent, end_date__year=year)\n # for payable_rent in payable_rents:\n # calculated_amount = round(rent.get_amount_for_date_range(\n # payable_rent.start_date, payable_rent.end_date), 2)\n #\n # self.stdout.write(' Payable rent amount: {}'.format(payable_rent.amount))\n # self.stdout.write(' Calculated amount: {} {}'.format(\n # calculated_amount,\n # ' matches' if payable_rent.amount == calculated_amount else ' MISMATCH'\n # ))\n # except PayableRent.DoesNotExist:\n # self.stdout.write(' No PayableRent for this period')\n","sub_path":"leasing/management/commands/compare_rent_amounts.py","file_name":"compare_rent_amounts.py","file_ext":"py","file_size_in_byte":4599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"140691222","text":"\"\"\"\r\nAgentpy Model Module\r\nContent: Main class for agent-based models\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom datetime import datetime\r\n\r\nfrom .output import DataDict\r\nfrom .objects import ApEnv, Agent, Environment\r\nfrom .network import Network\r\nfrom .grid import Grid\r\nfrom .space import Space\r\nfrom .tools import AttrDict, AgentpyError, make_list\r\nfrom .lists import ObjList, EnvList\r\n\r\n\r\nclass Model(ApEnv):\r\n \"\"\"\r\n An agent-based model that can hold environments and agents.\r\n\r\n This class can be used as a parent class for custom models.\r\n Class attributes can be accessed like dictionary items.\r\n To define the simulation procedure, you can override the methods\r\n :func:`Model.setup`, :func:`Model.step`,\r\n :func:`Model.update`, and :func:`Model.end`.\r\n The perform the simulation, use :func:`Model.run`.\r\n\r\n Attributes:\r\n name (str): The models' name.\r\n envs (EnvList): The models' environments.\r\n agents (AgentList): The models' agents.\r\n objects (ObjList): The models' agents and environments.\r\n random (numpy.random.Generator): The models random number generator.\r\n p (AttrDict): The models' parameters.\r\n t (int): Current time-step of the model.\r\n log (dict): The models' recorded variables.\r\n measures (dict): The models' recorded measures.\r\n var_keys (list): Names of the model's variables.\r\n output (DataDict):\r\n Output data that is generated at the end of a simulation.\r\n\r\n Arguments:\r\n parameters (dict, optional): Dictionary of model parameters.\r\n Recommended types for parameters are int, float, str, list,\r\n numpy.integer, numpy.floating, and numpy.ndarray.\r\n run_id (int, optional): Number of current run (default None).\r\n scenario (str, optional): Current scenario (default None).\r\n **kwargs: Will be forwarded to :func:`Model.setup`.\r\n \"\"\"\r\n\r\n def __init__(self, parameters=None, run_id=None, scenario=None, **kwargs):\r\n\r\n self._id_counter = -1\r\n self._obj_dict = {} # Objects mapped by their id\r\n super().__init__(self) # Model will assign itself id 0\r\n\r\n self.t = 0\r\n self.run_id = run_id\r\n self.scenario = scenario\r\n\r\n # Recording\r\n self._measure_log = {}\r\n self.output = DataDict()\r\n self.output.log = {'model_type': self.type,\r\n 'time_stamp': str(datetime.now())}\r\n\r\n # Private variables\r\n self._envs = EnvList()\r\n self._random = np.random.default_rng()\r\n self._steps = None\r\n self._parameters = AttrDict(parameters)\r\n self._stop = False\r\n self._set_var_ignore()\r\n self._setup_kwargs = kwargs\r\n\r\n def __repr__(self):\r\n rep = f\"Agent-based model {{\"\r\n keys = ['type', 'agents', 'envs', 'p']\r\n items = [(k, self[k]) for k in keys]\r\n items += list(self.__dict__.items())\r\n for k, v in items:\r\n if k[0] != '_':\r\n v = v._short_repr() if '_short_repr' in dir(v) else v\r\n rep += f\"\\n'{k}': {v}\"\r\n return rep + '\\n}'\r\n\r\n # Properties ------------------------------------------------------------ #\r\n\r\n @property\r\n def objects(self):\r\n return ObjList(self.agents + self.envs)\r\n\r\n @property\r\n def env(self):\r\n if len(self._envs) == 1:\r\n return self._envs[0]\r\n elif len(self._envs) == 0:\r\n raise AgentpyError(f\"{self} has no environment.\")\r\n else:\r\n raise AgentpyError(f\"{self} has more than one environment. Please \"\r\n \"use `Agent.envs` instead of `Agent.env`.\")\r\n\r\n @property\r\n def envs(self):\r\n return self._envs\r\n\r\n @property\r\n def random(self):\r\n return self._random\r\n\r\n @property\r\n def measures(self):\r\n return self._measure_log\r\n\r\n # Handling object ids --------------------------------------------------- #\r\n\r\n def get_obj(self, obj_id):\r\n \"\"\" Returns model object with passed object id (int). \"\"\"\r\n try:\r\n return self._obj_dict[obj_id]\r\n except KeyError:\r\n raise ValueError(f\"Model has no object with obj_id '{obj_id}'.\")\r\n\r\n def _new_id(self):\r\n \"\"\" Returns a new unique object id (int). \"\"\"\r\n self._id_counter += 1\r\n return self._id_counter\r\n\r\n # Adding and removing objects ------------------------------------------- #\r\n\r\n def remove_agents(self, agents):\r\n \"\"\" Removes agents from the model, including all environments.\r\n If used during a loop over an :class:`AgentList`,\r\n consider using `AgentList.call` with the argument `check_alive=True`\r\n to avoid calling agents after they have been deleted. \"\"\"\r\n for agent in list(make_list(agents)): # Soft copy as list is changed\r\n if agent.alive:\r\n self._agents.remove(agent)\r\n for env in agent.envs:\r\n env._agents.remove(agent)\r\n agent._envs = EnvList()\r\n agent._alive = False\r\n\r\n def add_env(self, env_class=Environment, **kwargs):\r\n \"\"\" Adds a new environment to the model.\r\n\r\n Arguments:\r\n env_class (type, optional):\r\n The environment class that should be used.\r\n If none is passed, :class:`Environment` is used.\r\n **kwargs: Forwarded to the new environment.\r\n\r\n Returns:\r\n Environment: The new environment.\r\n \"\"\"\r\n new_env = env_class(self.model, **kwargs)\r\n self.envs.append(new_env)\r\n return new_env\r\n\r\n def add_network(self, graph=None, agents=None, **kwargs):\r\n \"\"\" Adds a new :class:`Network` environment to the model.\r\n Arguments are forwarded to the new environment. \"\"\"\r\n new_env = Network(self.model, graph=graph, agents=agents, **kwargs)\r\n self.envs.append(new_env)\r\n return new_env\r\n\r\n def add_grid(self, shape, **kwargs):\r\n \"\"\" Adds a new :class:`Grid` environment to the model.\r\n Arguments are forwarded to the new environment. \"\"\"\r\n new_env = Grid(self.model, shape=shape, **kwargs)\r\n self.envs.append(new_env)\r\n return new_env\r\n\r\n def add_space(self, shape, **kwargs):\r\n \"\"\" Adds a new :class:`Space` environment to the model.\r\n Arguments are forwarded to the new environment. \"\"\"\r\n new_env = Space(self.model, shape=shape, **kwargs)\r\n self.envs.append(new_env)\r\n return new_env\r\n\r\n # Recording ------------------------------------------------------------- #\r\n\r\n def measure(self, name, value):\r\n \"\"\" Store a new evaluation measure.\r\n\r\n Evaluation measures are meant to be 'summary statistics' or 'reporters'\r\n of the whole simulation, and only one value can be stored per run.\r\n In comparison, variables that are recorded with :func:`Model.record`\r\n can be recorded multiple times for each time-step and object.\r\n\r\n Arguments:\r\n name (str): Name of the measure.\r\n value (int or float): Measured value.\r\n\r\n Examples:\r\n\r\n Store a measure `x` with a value `42`::\r\n\r\n model.measure('x', 42)\r\n\r\n Define a custom model that stores an evaluation measure `sum_id`\r\n with the sum of all agent ids at the end of the simulation::\r\n\r\n class MyModel(ap.Model):\r\n def setup(self):\r\n agents = self.add_agents(self.p.agents)\r\n def end(self):\r\n self.measure('sum_id', sum(self.agents.id))\r\n\r\n Running an experiment over different numbers of agents for this\r\n model yields the following datadict of measures::\r\n\r\n >>> sample = ap.sample({'agents': (1, 3)}, 3)\r\n >>> exp = ap.Experiment(MyModel, sample)\r\n >>> results = exp.run()\r\n >>> print(results.measures)\r\n sum_id\r\n run_id\r\n 0 1\r\n 1 3\r\n 2 6\r\n \"\"\"\r\n self._measure_log[name] = [value]\r\n\r\n # Placeholder methods for custom simulation methods --------------------- #\r\n\r\n def setup(self, **kwargs):\r\n \"\"\" Defines the model's actions before the first simulation step.\r\n Can be overwritten and used to initiate agents and environments.\"\"\"\r\n pass\r\n\r\n def step(self):\r\n \"\"\" Defines the model's actions during each simulation step.\r\n Can be overwritten and used to set the models' main dynamics.\"\"\"\r\n pass\r\n\r\n def update(self):\r\n \"\"\" Defines the model's actions after setup and each simulation step.\r\n Can be overwritten and used for the recording of dynamic variables. \"\"\"\r\n pass\r\n\r\n def end(self):\r\n \"\"\" Defines the model's actions after the last simulation step.\r\n Can be overwritten and used for final calculations and measures.\"\"\"\r\n pass\r\n\r\n # Simulation routines (in line with ipysimulate) ------------------------ #\r\n\r\n def set_parameters(self, parameters):\r\n \"\"\" Adds or updates passed parameters. \"\"\"\r\n self._parameters.update(parameters)\r\n\r\n def run_setup(self, steps=None, seed=None):\r\n \"\"\" Sets up time-step 0 of the simulation.\r\n Prepares steps and a random number generator,\r\n and then calls :func:`Model.setup` and :func:`Model.update`. \"\"\"\r\n\r\n # Prepare random generator\r\n if not seed and 'seed' in self.p:\r\n seed = self.p['seed'] # Take seed from parameters\r\n if seed:\r\n self._random = np.random.default_rng(seed=seed)\r\n\r\n # Prepare steps\r\n if steps is None:\r\n self._steps = self.p['steps'] if 'steps' in self.p else 1000\r\n else:\r\n self._steps = steps\r\n\r\n # Initiate simulation\r\n self._stop = False\r\n\r\n # Execute setup and first update\r\n self.setup(**self._setup_kwargs)\r\n self.update()\r\n\r\n # Stop simulation if t too high\r\n if self.t >= self._steps:\r\n self._stop = True\r\n\r\n def run_step(self):\r\n \"\"\" Proceeds the simulation by one step, incrementing `Model.t` by 1\r\n and then calling :func:`Model.step` and :func:`Model.update`.\"\"\"\r\n self.t += 1\r\n self.step()\r\n self.update()\r\n if self.t >= self._steps:\r\n self._stop = True\r\n\r\n def stop(self):\r\n \"\"\" Stops :meth:`Model.run` during an active simulation. \"\"\"\r\n self._stop = True\r\n\r\n @property\r\n def is_running(self):\r\n \"\"\" Indicates whether the model is currently running (bool). \"\"\"\r\n return not self._stop\r\n\r\n def reset(self):\r\n \"\"\" Reset model to initial conditions and call setup. \"\"\"\r\n self.__init__(parameters=self.p,\r\n run_id=self.run_id,\r\n scenario=self.scenario,\r\n **self._setup_kwargs)\r\n\r\n # Data management ------------------------------------------------------- #\r\n\r\n def create_output(self):\r\n \"\"\" Generates a :class:`DataDict` with dataframes of all recorded\r\n variables and measures, which will be stored in :obj:`Model.output`.\r\n \"\"\"\r\n\r\n def output_from_obj_list(self, obj_list, columns):\r\n # Aggregate logs per object type\r\n obj_types = {}\r\n for obj in obj_list:\r\n\r\n if obj.log: # Check for variables\r\n\r\n # Add object id/key to object log\r\n obj.log['obj_id'] = [obj.id] * len(obj.log['t'])\r\n\r\n # Initiate object type if new\r\n obj_type = type(obj).__name__\r\n\r\n if obj_type not in obj_types.keys():\r\n obj_types[obj_type] = {}\r\n\r\n # Add object log to aggr. log\r\n for k, v in obj.log.items():\r\n if k not in obj_types[obj_type]:\r\n obj_types[obj.type][k] = []\r\n obj_types[obj_type][k].extend(v)\r\n\r\n # Transform logs into dataframes\r\n for obj_type, log in obj_types.items():\r\n df = pd.DataFrame(log)\r\n for k, v in columns.items():\r\n df[k] = v # Set additional index columns\r\n df = df.set_index(list(columns.keys()) + ['obj_id', 't'])\r\n self.output['variables'][obj_type] = df\r\n\r\n # 0 - Document parameters\r\n if self.p:\r\n self.output['parameters'] = self.p\r\n\r\n # 1 - Define additional index columns\r\n columns = {}\r\n if self.run_id is not None:\r\n columns['run_id'] = self.run_id\r\n if self.scenario is not None:\r\n columns['scenario'] = self.scenario\r\n\r\n # 2 - Create measure output\r\n if self._measure_log:\r\n d = self._measure_log\r\n for key, value in columns.items():\r\n d[key] = value\r\n df = pd.DataFrame(d)\r\n if columns:\r\n df = df.set_index(list(columns.keys()))\r\n self.output['measures'] = df\r\n\r\n # 3 - Create variable output\r\n self.output['variables'] = DataDict()\r\n\r\n # 3.1 - Create variable output for objects\r\n output_from_obj_list(self, self.agents, columns)\r\n output_from_obj_list(self, self.envs, columns)\r\n\r\n # 3.2 - Create variable output for model\r\n if self.log:\r\n df = pd.DataFrame(self.log)\r\n # df['obj_id'] = 'model'\r\n for k, v in columns.items():\r\n df[k] = v\r\n df = df.set_index(list(columns.keys()) + ['t']) # 'obj_id',\r\n\r\n if self.output['variables']:\r\n self.output['variables'][self.type] = df\r\n else:\r\n self.output['variables'] = df # No subdict if only model vars\r\n\r\n # 3.3 - Remove variable dict if empty (i.e. nothing has been added)\r\n elif not self.output['variables']:\r\n del self.output['variables']\r\n\r\n # Main simulation method for direct use --------------------------------- #\r\n\r\n def run(self, steps=None, seed=None, display=True):\r\n \"\"\" Executes the simulation of the model.\r\n\r\n It starts by calling :func:`Model.run_setup` and then calls\r\n :func:`Model.run_step` until the method :func:`Model.stop` is called\r\n or `steps` is reached. After that, :func:`Model.end` and\r\n :func:`Model.create_output` are called.\r\n\r\n Arguments:\r\n steps (int, optional):\r\n Maximum number of steps for the simulation to run.\r\n If none is given, the parameter 'Model.p.steps' will be used.\r\n If there is no such parameter, 'steps' will be set to 1000.\r\n seed (int, optional):\r\n Seed to set for :obj:`Model.random`\r\n at the beginning of the simulation.\r\n If none is given, the parameter 'Model.p.seed' will be used.\r\n If there is no such parameter, as random seed will be set.\r\n display (bool, optional):\r\n Whether to display simulation progress (default True).\r\n\r\n Returns:\r\n DataDict: Recorded model data,\r\n which can also be found in :attr:`Model.output`.\r\n \"\"\"\r\n\r\n dt0 = datetime.now() # Time-Stamp\r\n self.run_setup(steps, seed)\r\n while not self._stop:\r\n self.run_step()\r\n if display:\r\n print(f\"\\rCompleted: {self.t} steps\", end='')\r\n self.end()\r\n self.create_output()\r\n self.output.log['run_time'] = ct = str(datetime.now() - dt0)\r\n self.output.log['steps'] = self.t\r\n\r\n if display:\r\n print(f\"\\nRun time: {ct}\\nSimulation finished\")\r\n\r\n return self.output\r\n\r\n","sub_path":"agentpy/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":15997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"89223471","text":"#!/usr/bin/python3\n\n\"\"\"\nY. Abu-Mostafa, Learning From Data\nProblem 2.24 (c)\nCompute bias, variance and expected out-of-sample error\n\"\"\"\n\nimport numpy as np\nimport matplotlib as mpl\nfont = { 'size' : 20 }\nmpl.rc( 'xtick' , labelsize = 20 )\nmpl.rc( 'ytick' , labelsize = 20 )\nmpl.rc( 'font' , **font )\nimport matplotlib.pyplot as plt\nplt.rc('text' , usetex = True )\n\n__author__ = \"Jinghua Feng\"\n__copyright__ = \"Copyright 2018, The Machine Learning Project\"\n__credits__ = [\"Jinghua Feng\"]\n__license__ = \"GPL\"\n__version__ = \"0.1.0\"\n__maintainer__ = \"Jinghua Feng\"\n__email__ = \"tracygooo@gmail.com\"\n__status__ = \"Dev\"\n\ndef randomData( N ) :\n return np.random.uniform( -1 , 1 , ( N , 2 ) )\n\ndef computeGdata( D ) :\n l = D.shape[ 0 ]\n g = np.zeros( ( l , 2 ) )\n g_ave = np.zeros( 2 )\n for i in np.arange( l ) :\n g[i] = D[i,0] + D[i,1] , -D[i,0] * D[i,1]\n g_ave += g[ i ]\n g_ave /= l\n return ( g , g_ave )\n\ndef gBar( x , g_ave ) :\n return g_ave[ 0 ] * x + g_ave[ 1 ]\n\ndef computeVarX( g , g_ave ) :\n l = g.shape[ 0 ]\n a2 , a1 , a0 = 0 , 0 , 0\n for i in np.arange( l ) :\n slp_diff = g[ i , 0] - g_ave[ 0 ]\n int_diff = g[ i , 1] - g_ave[ 1 ]\n a2 += pow( slp_diff , 2 )\n a1 += 2 * slp_diff * int_diff\n a0 += pow( int_diff , 2 )\n a2 /= N\n a1 /= N\n a0 /= N\n return ( a2 , a1 , a0 )\n\ndef varX( x , a2 , a1 , a0 ) :\n return a2 * pow( x , 2 ) + a1 * x + a0\n\ndef computeVar( a2 , a1 , a0 ) :\n dx = 0.01\n x = np.arange( -1 , 1 , dx )\n var = 0.0\n for i in np.arange( len( x ) ) :\n var += varX( x[ i ] , a2 , a1 , a0 )\n var *= dx / 2.0\n return var\n\ndef computeBias( g_ave ) :\n dx = 0.01\n x = np.arange( -1 , 1 , dx )\n bias = 0.0\n for i in np.arange( len( x ) ) :\n bias += pow( gBar( x[ i ] , g_ave) - pow( x[ i ] , 2 ) , 2.0 )\n bias *= dx / 2.0\n return bias\n\n# [ g^D(x) - f(x) ]^2\ndef gfDiff( slope , itcpt , x ) :\n return pow( slope * x + itcpt - pow( x , 2 ) , 2 )\n\n# Integration of [ g^D(x) - f(x) ]^2\ndef intGfDiff( slope , incpt ) :\n dx = 0.01\n x = np.arange( -1 , 1 , dx )\n itgd = 0\n for i in np.arange( len( x ) ) :\n itgd += gfDiff( slope , incpt , x[ i ] )\n itgd *= dx / 2.0\n return itgd\n\ndef computeOutSampleError( g ) :\n l = g.shape[ 0 ]\n E_out = 0\n for i in np.arange( l ) :\n E_out += intGfDiff( g[i][0] , g[i][1] )\n E_out /= l\n return E_out\n\ndef plotGF( slope , itcpt ) :\n x = np.arange( -1 , 1 , 0.01 )\n plt.plot( x , slope * x + itcpt , label = r'$\\bar{g}(x)$' )\n plt.plot( x , pow( x , 2 ) , label = '$f(x)$' )\n plt.legend( loc = 'upper center')\n plt.savefig( 'gbar_f.png' , format = 'png' )\n plt.show()\n\ndef plotVarX( a2 , a1 , a0 ) :\n x = np.arange( -1 , 1 , 0.01 )\n plt.plot( x , varX( x , a2 , a1 , a0 ) )\n #plt.show()\n\ndef plotBiasX( g_ave ) :\n x = np.arange( -1 , 1 , 0.01 )\n k , b = g_ave[ 0 ] , g_ave[ 1 ]\n plt.plot( x , pow( k*x + b - pow( x , 2 ), 2 ) )\n #plt.show()\n\n\nif __name__ == '__main__' :\n\n N = 1000000\n data = randomData( N )\n g , g_ave = computeGdata( data )\n a2 , a1 , a0 = computeVarX( g , g_ave )\n var = computeVar( a2 , a1 , a0 )\n bias = computeBias( g_ave )\n E_out = computeOutSampleError( g )\n\n #plotVarX( a2 , a1 , a0 )\n #plotBiasX( g_ave )\n plotGF( g_ave[ 0 ] , g_ave[ 1 ] )\n\n print( 'bias = ' , bias )\n print( 'var = ' , var )\n print( 'Expected out-of-sample error with respect to data sets: ' , E_out )\n print( 'Expected out-of-sample error with bias and var: ' , bias + var )\n","sub_path":"approximation-generalization-tradeoff-a5/code/p2_24.py","file_name":"p2_24.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"136255687","text":"import logging\nimport os\n\nfrom webob.dec import wsgify\nfrom webob.exc import HTTPUnauthorized\n\nfrom pyramid.settings import asbool\n\nfrom mozsvc.config import get_configurator\nfrom mozsvc.plugin import load_and_register\n\nfrom appsync.storage import StorageAuthError\n\n\nlogger = logging.getLogger('appsync')\n\n\ndef includeme(config):\n # adds cornice\n config.include(\"cornice\")\n\n # adds Mozilla default views\n config.include(\"mozsvc\")\n\n # adds local views\n config.scan(\"appsync.views\")\n\n # initializes the storage backend\n load_and_register(\"storage\", config)\n\n\nclass CatchAuthError(object):\n def __init__(self, app):\n self.app = app\n\n @wsgify\n def __call__(self, request):\n try:\n return request.get_response(self.app)\n except StorageAuthError:\n return HTTPUnauthorized()\n\n\ndef main(global_config, **settings):\n config = get_configurator(global_config, **settings)\n\n # Use autocommit if we're in testing mode.\n mock_browserid = asbool(os.path.expandvars(global_config.get('test', '')))\n if mock_browserid:\n config.autocommit = True\n\n # Get all the default config for appsync.\n config.include(includeme)\n\n # Add testing views if we're in testing mode.\n if mock_browserid:\n config.scan(\"appsync.tests.views\")\n config.registry['mock_browserid'] = True\n\n return CatchAuthError(config.make_wsgi_app())\n","sub_path":"appsync/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"653480894","text":"import sys\n\nOP_ADD = 1\nOP_MUL = 2\nOP_HALT = 99\nIP_STEP = 4\nNOUN = 1\nVERB = 2\nSYMBOLS = {OP_ADD: \"+\", OP_MUL: \"*\", OP_HALT: \"H\"}\n\n\ndef run(prog, noun=12, verb=2):\n ip = 0\n done = False\n prog[NOUN] = noun\n prog[VERB] = verb\n while not done:\n op = prog[ip]\n arg1 = prog[prog[ip + 1]]\n arg2 = prog[prog[ip + 2]]\n target_address = prog[ip + 3]\n if op == OP_ADD:\n prog[target_address] = arg1 + arg2\n elif op == OP_MUL:\n prog[target_address] = arg1 * arg2\n elif op == OP_HALT:\n done = True\n else:\n raise Exception\n ip += IP_STEP\n return prog\n\n\ndef find(mem, target):\n params = {run(mem[:], noun, verb)[0]: (noun, verb)\n for noun in range(100)\n for verb in range(100)}\n return params.get(target)\n\n\ndef main(f):\n with open(f) as prog:\n mem = list(map(lambda s: int(s), prog.readline().split(sep=\",\")))\n noun, verb = find(mem, 19690720)\n print(100 * noun + verb)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n main(sys.argv[1])\n else:\n print(\"python3 01_cpu.py input.txt\")\n","sub_path":"day02/02_cpu.py","file_name":"02_cpu.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"425907791","text":"import os\nimport cv2\nimport tkinter\nimport matplotlib.image as img\nimport numpy\nfrom tensorflow import keras\nfrom scipy import misc\nfrom PIL import Image,ImageDraw\n\nmain_path = \"C:\\\\Users\\\\Ayush Sharma\\\\Desktop\\\\Programs\\\\Face-Detection\"\ntraining_directory = main_path + \"\\\\training\"\nresized_training_directory = main_path + \"\\\\resized\"\nnames = []\nlabels = []\n\ntry:\n os.mkdir(training_directory)\n os.mkdir(resized_training_directory)\nexcept:\n pass\n\nfor name in os.listdir(training_directory):\n names.append(name)\n\ndef add_training_class(name:str):\n try:\n os.mkdir(training_directory + f\"\\\\{name}\")\n names.append(name)\n print(\"Done\")\n except:\n print(\"Already Made\")\n\ntraining_class_directories = os.listdir(training_directory)\n\nfor i in range(0,len(training_class_directories)):\n for k in os.listdir(training_directory + \"\\\\\" + training_class_directories[i]):\n ig = Image.open(training_directory + \"\\\\\" + training_class_directories[i] + \"\\\\\" + k)\n ig = ig.resize((100,100))\n ig.save(resized_training_directory + \"\\\\\" + f\"resized_{k}\")\n labels.append(i)\n\nprocessed_images = []\nfor i in os.listdir(resized_training_directory):\n n = img.imread(resized_training_directory + \"\\\\\" + i)\n processed_images.append(n)\n\ntraining_set = numpy.array(processed_images)\n\nmodel = keras.Sequential([\n keras.layers.Flatten(input_shape=(training_set.shape[1],training_set.shape[2],training_set.shape[3])),\n keras.layers.Dense(512,activation='relu'),\n keras.layers.Dense(512,activation='relu'),\n keras.layers.Dense(512,activation='relu'),\n keras.layers.Dense(512,activation='relu'),\n keras.layers.Dense(512,activation='relu'),\n keras.layers.Dense(512,activation='relu'),\n keras.layers.Dense(512,activation='relu'),\n keras.layers.Dense(512,activation='relu'),\n keras.layers.Dense(len(names))\n])\n\nfor i in range(0,len(labels)):\n labels[i] = float(labels[i])\n\nlabels = numpy.array(labels)\n\nmodel.compile(optimizer='adam',\n loss= keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\ncheckpoint_path = \"training_1/cp.ckpt\"\ncheckpoint_dir = os.path.dirname(checkpoint_path)\n\ncp_callback = keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,\n save_weights_only=True,\n verbose=1)\n\n#model.fit(training_set, labels, epochs=512,callbacks=cp_callback)\n\ndef Recognise(main_path : str):\n predictions_directory = main_path + \"\\\\\" + \"predictions\"\n resized_predictions_directory = main_path + \"\\\\\" + \"predictions_resized\"\n for i in os.listdir(predictions_directory):\n ig = Image.open(predictions_directory + \"\\\\\" + i)\n ig = ig.resize((100,100))\n ig.save(resized_predictions_directory + \"\\\\\" + f\"resized_{i}\")\n\n prediction_images = []\n for i in os.listdir(resized_predictions_directory):\n n = img.imread(resized_predictions_directory + \"\\\\\" + i)\n prediction_images.append(n)\n\n prediction_images = numpy.array(prediction_images)\n\n test_images_array = numpy.array(prediction_images)\n probability_model = keras.Sequential([model, keras.layers.Softmax()])\n predictions = probability_model.predict(test_images_array)\n\n for i in os.listdir(predictions_directory):\n os.remove(predictions_directory + \"//\" + i)\n for i in os.listdir(resized_predictions_directory):\n os.remove(resized_predictions_directory + \"//\" + i)\n \n return names[numpy.argmax(predictions)]\n","sub_path":"Main_Algorithm.py","file_name":"Main_Algorithm.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"245174604","text":"\"\"\"\n Impedance Analyzer \n Author: Lars Lindner\n Revision: 25/03/2021\n\n -> This programm does a linear frequency sweep from 'freq_start' to 'freq_end' with 'freq_delta' using a GUI interface\n -> It prints the numerical values of frequency [Hz], impedance [Ohm] and phase [deg] as a txt-file\n\"\"\"\n\n\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import *\n\nfrom ctypes import *\nfrom dwfconstants import *\n\nimport math\nimport time\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# region Init\n# Variables Definitons\nwin = Tk()\nhdwf = c_int()\nszerr = create_string_buffer(512)\nsts = c_byte()\nimpedance = c_double()\nphase = c_double()\n\n# Default values for parameters\nfreq_start = int(3.9e6)\nfreq_end = int(4.1e6)\nfreq_delta = int(10000)\nresistance = int(1000)\namplitude = float(1)\n\n# Load .dll\nif sys.platform.startswith(\"win\"):\n dwf = cdll.LoadLibrary(\"dwf.dll\")\nelif sys.platform.startswith(\"darwin\"):\n dwf = cdll.LoadLibrary(\"/Library/Frameworks/dwf.framework/dwf\")\nelse:\n dwf = cdll.LoadLibrary(\"libdwf.so\")\n# endregion\n\n\n# region Window Functions\ndef connectFunction():\n dwf.FDwfDeviceOpen(c_int(-1), byref(hdwf))\n\n if hdwf.value == hdwfNone.value:\n dwf.FDwfGetLastErrorMsg(szerr)\n infoOutput.insert(tk.INSERT, str(szerr.value) + \"\\n\")\n infoOutput.see(\"end\")\n else:\n connectButton[\"state\"] = tk.DISABLED\n disconnectButton[\"state\"] = tk.NORMAL\n startButton[\"state\"] = tk.NORMAL\n setButton[\"state\"] = tk.NORMAL\n infoOutput.insert(tk.INSERT, \"AD2 connected\\n\")\n infoOutput.see(\"end\")\n\n\ndef disconnectFunction():\n dwf.FDwfDeviceClose(hdwf)\n connectButton[\"state\"] = tk.NORMAL\n disconnectButton[\"state\"] = tk.DISABLED\n startButton[\"state\"] = tk.DISABLED\n setButton[\"state\"] = tk.DISABLED\n infoOutput.insert(tk.INSERT, \"AD2 disconnected\\n\")\n infoOutput.see(\"end\")\n\n\ndef setFunction():\n global freq_start\n global freq_end\n global freq_delta\n global resistance\n global amplitude\n freq_start = int(startInput.get())\n freq_end = int(endInput.get())\n freq_delta = int(deltaInput.get())\n resistance = int(resistanceInput.get())\n amplitude = float(amplitudeInput.get())\n\n # this option will enable dynamic adjustment of analog out settings like: frequency, amplitude...\n dwf.FDwfDeviceAutoConfigureSet(hdwf, c_int(3))\n dwf.FDwfAnalogImpedanceReset(hdwf)\n # 0 = W1-C1-DUT-C2-R-GND, 1 = W1-C1-R-C2-DUT-GND, 8 = AD IA adapter\n dwf.FDwfAnalogImpedanceModeSet(hdwf, c_int(8))\n dwf.FDwfAnalogImpedanceFrequencySet(\n hdwf, c_double(freq_start)\n ) # frequency in Hertz\n dwf.FDwfAnalogImpedanceReferenceSet(\n hdwf, c_double(resistance)\n ) # reference resistor value in Ohms\n # Measurement amplitude, 0V to peak signal\n dwf.FDwfAnalogImpedanceAmplitudeSet(hdwf, c_double(amplitude))\n time.sleep(1)\n\n infoOutput.insert(tk.INSERT, \"All parameters set\\n\")\n infoOutput.see(\"end\")\n\n\ndef startFunction():\n initial = time.time()\n extfile = open(\"impedance_\" + str(amplitudeInput.get()) + \"V.txt\", \"w\")\n # extfile.write(\"Frequency [hz]\" + \"\\t\" + \"Impedance [Ohm]\" + \"\\t\" + \"Phase [deg]\" + \"\\n\")\n # infoOutput.insert(tk.INSERT, \"Start Measurement\\n\")\n # infoOutput.see('end')\n # time.sleep(1)\n\n dwf.FDwfAnalogImpedanceConfigure(hdwf, c_int(1)) # Measurement start\n\n for hz in range(freq_start, freq_end + 1, freq_delta):\n dwf.FDwfAnalogImpedanceFrequencySet(hdwf, c_double(hz))\n time.sleep(0.01)\n # ignore last capture since we changed the frequency\n dwf.FDwfAnalogImpedanceStatus(hdwf, None)\n\n while True:\n if dwf.FDwfAnalogImpedanceStatus(hdwf, byref(sts)) == 0:\n dwf.FDwfGetLastErrorMsg(szerr)\n print(str(szerr.value))\n quit()\n if sts.value == 2:\n break\n\n dwf.FDwfAnalogImpedanceStatusMeasure(\n hdwf, DwfAnalogImpedanceImpedance, byref(impedance)\n )\n dwf.FDwfAnalogImpedanceStatusMeasure(\n hdwf, DwfAnalogImpedanceImpedancePhase, byref(phase)\n )\n extfile.write(\n str(hz)\n + \"\\t\"\n + str(abs(impedance.value))\n + \"\\t\"\n + str((phase.value / math.pi) * 180.0)\n + \"\\n\"\n )\n # extfile.write(str(hz) + \"\\t\" + str(abs(impedance.value / 1000)) + \"\\t\" + str(phase.value) + \"\\n\")\n\n dwf.FDwfAnalogImpedanceConfigure(hdwf, c_int(0)) # Measurement end\n\n final = time.time()\n infoOutput.insert(tk.INSERT, \"Finished: \" + str(round(final - initial, 2)) + \"s\\n\")\n infoOutput.see(\"end\")\n\n\ndef clearFunction():\n infoOutput.delete(\"1.0\", END)\n\n\ndef quitFunction():\n dwf.FDwfDeviceClose(hdwf)\n win.quit()\n\n\n# endregion\n\n\n# region Window Layout\n# This is the section of code which creates the main window\nw = 540 # width for the Tk root\nh = 300 # height for the Tk root\nws = win.winfo_screenwidth() # width of the screen\nhs = win.winfo_screenheight() # height of the screen\nx = (ws / 2) - (w)\ny = (hs / 2) - (h)\n# set the dimensions of the screen and where it is placed\nwin.geometry(\"%dx%d+%d+%d\" % (w, h, x, y))\nwin.configure(background=\"#F0F8FF\")\nwin.title(\"Impedance Analyzer\")\n\n\n# This is the section of code which creates all labels\nstartLabel = Label(\n win, text=\"Start Freq [Hz]\", bg=\"#F0F8FF\", font=(\"arial\", 12, \"normal\")\n)\nstartLabel.grid(row=1, column=1, sticky=\"W\")\n\nendLabel = Label(win, text=\"End Freq [Hz]\", bg=\"#F0F8FF\", font=(\"arial\", 12, \"normal\"))\nendLabel.grid(row=2, column=1, sticky=\"W\")\n\ndeltaLabel = Label(\n win, text=\"Delta Freq [Hz]\", bg=\"#F0F8FF\", font=(\"arial\", 12, \"normal\")\n)\ndeltaLabel.grid(row=3, column=1, sticky=\"W\")\n\nresistorLabel = Label(\n win, text=\"Resistor [Ohm]\", bg=\"#F0F8FF\", font=(\"arial\", 12, \"normal\")\n)\nresistorLabel.grid(row=4, column=1, sticky=\"W\")\n\namplitudeLabel = Label(\n win, text=\"Amplitude [V]\", bg=\"#F0F8FF\", font=(\"arial\", 12, \"normal\")\n)\namplitudeLabel.grid(row=5, column=1, sticky=\"W\")\n\ninfoLabel = Label(win, text=\"Info\", bg=\"#F0F8FF\", font=(\"arial\", 12, \"normal\"))\ninfoLabel.grid(row=6, column=1, sticky=\"W\")\n\n\n# This is the section of code which creates all text input boxes\nstartInput = Entry(win)\nstartInput.insert(END, str(freq_start))\nstartInput.grid(row=1, column=2, padx=20, ipadx=20, sticky=\"W\")\n\nendInput = Entry(win)\nendInput.insert(END, str(freq_end))\nendInput.grid(row=2, column=2, padx=20, ipadx=20)\n\ndeltaInput = Entry(win)\ndeltaInput.insert(END, str(freq_delta))\ndeltaInput.grid(row=3, column=2, padx=20, ipadx=20)\n\nresistanceInput = Entry(win)\nresistanceInput.insert(END, str(resistance))\nresistanceInput.grid(row=4, column=2, padx=20, ipadx=20)\n\namplitudeInput = Entry(win)\namplitudeInput.insert(END, str(amplitude))\namplitudeInput.grid(row=5, column=2, padx=20, ipadx=20)\n\n\n# This is the section of code which creates all text output boxes\ninfoOutput = Text(win, height=5, width=25)\ninfoOutput.grid(row=6, column=2, sticky=\"W\")\n\n\n# This is the section of code which creates all buttons\nconnectButton = Button(\n win,\n text=\"Connect\",\n state=NORMAL,\n bg=\"#F0F8FF\",\n font=(\"arial\", 12, \"normal\"),\n command=connectFunction,\n)\n# connectButton = ttk.Button(win, text = 'Connect')\nconnectButton.grid(row=1, column=3, padx=40, sticky=\"E\", ipadx=25)\n\ndisconnectButton = Button(\n win,\n text=\"Disconnect\",\n state=DISABLED,\n bg=\"#F0F8FF\",\n font=(\"arial\", 12, \"normal\"),\n command=disconnectFunction,\n)\ndisconnectButton.grid(row=2, column=3, padx=40, sticky=\"E\", ipadx=25)\n\nsetButton = Button(\n win,\n text=\"Set Parameters\",\n state=DISABLED,\n bg=\"#F0F8FF\",\n font=(\"arial\", 12, \"normal\"),\n command=setFunction,\n)\nsetButton.grid(row=4, column=3, padx=40, sticky=\"E\", ipadx=25)\n\nstartButton = Button(\n win,\n text=\"Start\",\n state=DISABLED,\n bg=\"#F0F8FF\",\n font=(\"arial\", 12, \"normal\"),\n command=startFunction,\n)\nstartButton.grid(row=5, column=3, padx=40, sticky=\"E\", ipadx=25)\n\nclearButton = Button(\n win,\n text=\"Info Clear\",\n state=NORMAL,\n bg=\"#F0F8FF\",\n font=(\"arial\", 12, \"normal\"),\n command=clearFunction,\n)\nclearButton.grid(row=6, column=3, padx=40, sticky=\"E\", ipadx=25)\n\nquitButton = Button(\n win,\n text=\"Quit\",\n state=NORMAL,\n bg=\"#F0F8FF\",\n font=(\"arial\", 12, \"normal\"),\n command=quitFunction,\n)\nquitButton.grid(row=7, column=3, padx=40, sticky=\"E\", ipadx=25)\n# endregion\n\n\n# Runs the event loop of Tkinter\nwin.mainloop()\n","sub_path":".vscode/modules/ImpedanceAnalyzer_GUI_lin.py","file_name":"ImpedanceAnalyzer_GUI_lin.py","file_ext":"py","file_size_in_byte":8491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"348742625","text":"# coding: utf-8\n# Copyright 2017 video++ Project, SJTU MediaLab\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport vpp.log as log\nfrom vpp.config import CONF\n\nLOG = log.get_logger(__name__, CONF.server_log_file)\n\n# ordered queue, FIFO\npending_queue = []\n\n# unordered queues, query by job_id\nrunning_queue = {} # running jobs\n\n# error jobs, will be re-scheduled later\n# producer: jobtracker, scheduler\n# consumer: scheduler\nerror_job_queue = []\n\n# collection of jobs which containing error or timeout tasks\n# producer: jobtracker\n# consumer: scheduler\ntask_error_job_queue = []\n\n# done job\n# producer: jobtracker\n# consumer: scheduler\ndone_job_queue = []\n","sub_path":"vpp/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"505365732","text":"\"\"\"\nunittests.test_links.py\n~~~~~~~~~~~~~~~~~~~~~~~\n\nLinks object test coverage\n\n:copyright: (c) 2019 by Oleg Butuzov.\n:license: Apache2, see LICENSE for more details.\n\"\"\"\n\n# pylint: disable=redefined-outer-name\n\n# -- Imports -------------------------------------------------------------------\n\nimport pytest\n\nfrom .helpers import Page\n\nfrom deadlinks import (Link, URL)\nfrom deadlinks.status import Status\nfrom deadlinks.exceptions import (\n DeadlinksIgnoredURL,\n DeadlinksRedirectionURL,\n)\n\n# -- Tests ---------------------------------------------------------------------\n\n\n@pytest.fixture(scope=\"module\")\ndef link():\n \"\"\" Return valid config object. \"\"\"\n return Link(\"https://google.com\")\n\n\n@pytest.mark.parametrize(\n 'base, url, expected',\n [\n (\n \"http://localhost:1313/documentation/\",\n \"part1.html\",\n \"http://localhost:1313/documentation/part1.html\",\n ),\n (\n \"http://localhost:1313/documentation\",\n \"part1.html\",\n \"http://localhost:1313/part1.html\",\n ),\n (\n \"http://localhost:1313/documentation\",\n \"../part1.html\",\n \"http://localhost:1313/part1.html\",\n ),\n (\n \"http://localhost:1313/documentation/\",\n \"../part1.html\",\n \"http://localhost:1313/part1.html\",\n ),\n ],\n)\ndef test_url_link(base, url, expected):\n \"\"\" Relative link generation. \"\"\"\n assert Link(base).link(url) == expected\n\n\n@pytest.mark.parametrize(\n 'base, url',\n [\n (\"http://localhost:1313/\", \"http://localhost:3000/\"),\n (\"http://google.com/\", \"http://bing.com/\"),\n (\"http://google.com/\", \"http://google.com.ua/\"),\n (\"http://google.com.ua/\", \"http://google.com\"),\n (\"http://google.com/\", \"http://ww1.google.com\"),\n (\"http://ww1.google.com/\", \"http://www.www.google.com\"),\n ],\n)\ndef test_is_external(base, url):\n \"\"\" External links. \"\"\"\n\n assert Link(base).is_external(URL(url))\n assert Link(url).is_external(URL(base))\n assert Link(base).is_external(Link(url))\n assert Link(url).is_external(Link(base))\n assert Link(base).is_external(url)\n assert Link(url).is_external(base)\n\n\n@pytest.mark.parametrize(\n 'base, url', [*[\n (\"http://localhost:1313/\", 2222),\n (\"http://localhost:1313/\", 2222.1),\n ]])\ndef test_is_external_of_wrong_type(base, url):\n \"\"\" (Mis)Typed external links \"\"\"\n\n with pytest.raises(TypeError):\n assert Link(base).is_external(url)\n\n\ndef test_non_string_message():\n \"\"\" (Mis)Typed external links \"\"\"\n\n with pytest.raises(TypeError):\n Link(\"http://google.com/\").message = 404\n\n\n@pytest.mark.parametrize(\n 'base, url',\n [\n (\"http://www.google.com/\", \"http://google.com\"),\n (\"http://www.www.google.com/\", \"http://www.www.google.com\"),\n (\"http://www.google.com/\", \"http://google.com:80\"),\n (\"https://www.google.com/\", \"https://google.com:443\"),\n (\"https://www.google.com:443/\", \"https://google.com\"),\n ],\n)\ndef test_is_internal_links(base, url):\n \"\"\" Are this links internal to url? \"\"\"\n\n assert not Link(base).is_external(url)\n assert not Link(url).is_external(base)\n assert not Link(base).is_external(URL(url))\n assert not Link(url).is_external(URL(base))\n\n\ndef test_links(server):\n \"\"\" General testing for link. \"\"\"\n\n url = server.router({\n '^/$': Page('
google').exists(),\n })\n\n l = Link(url)\n\n assert l.exists()\n assert len(l.links) == 1\n assert str(l) == url\n assert l.url() == url\n\n\n@pytest.mark.parametrize(\n \"url\",\n [\n \"localhost\", # no scheme\n \"http://localhost:4040404\", # no existsing domain\n \"http://:4040404\", # no existsing domain\n ],\n)\ndef test_bad_links(url):\n assert not Link(url).exists()\n\n\n@pytest.fixture(scope=\"function\")\ndef ignore_domains():\n \"\"\" Fixture for domains \"\"\"\n return [\"github.com\"]\n\n\n@pytest.fixture(scope=\"function\")\ndef ignore_pathes():\n \"\"\" Fixture for pathes. \"\"\"\n return [\"issues/new\", \"edit/master\", \"commit\"]\n\n\n@pytest.mark.parametrize(\n \"url\",\n [\n \"https://github.com/kubeflow/website/issues/new?title\",\n \"https://github.com/kubeflow/website/commit/d26bed8d8\",\n \"https://github.com/kubeflow/website/edit/master/content/docs/\",\n ],\n)\ndef test_ignored(ignore_domains, ignore_pathes, url):\n \"\"\" Ignored domains and pathes matching. \"\"\"\n\n assert Link(url).match_domains(ignore_domains)\n assert Link(url).match_pathes(ignore_pathes)\n\n\n@pytest.mark.parametrize(\"url\", [\n \"https://google.com\",\n \"http://github.com\",\n])\ndef test_is_valid(url):\n \"\"\" Tests URL for valid (for crawler) format. \"\"\"\n assert Link(url).is_valid()\n\n\ndef test_eq():\n \"\"\" Compare two objects. \"\"\"\n\n assert Link(\"http://google.com\") == Link(\"http://google.com\")\n assert Link(\"http://google.com\") == \"http://google.com\"\n assert \"http://google.com\" == Link(\"http://google.com\")\n\n with pytest.raises(TypeError):\n Link('http://google.com') == 1 # pylint: disable=expression-not-assigned\n\n\ndef test_referrer():\n \"\"\" Test referrer. \"\"\"\n\n l = Link(\"https://made.ua\")\n referrer = \"https://google.com\"\n l.add_referrer(referrer)\n l.add_referrer(referrer)\n\n assert referrer in l.get_referrers()\n\n\ndef test_match_domain():\n \"\"\" Domain matching. \"\"\"\n\n l = Link(\"https://made.ua\")\n assert l.match_domains([\"made.ua\"])\n assert not l.match_domains([\"google.com\"])\n\n\n@pytest.mark.timeout(2)\ndef test_existing_page(server):\n \"\"\" emulating slow server (responds after 1s) \"\"\"\n\n address = server.router({\n '^/$': Page(\"\").slow().exists(),\n })\n\n l = Link(address)\n assert l.status == Status.UNDEFINED\n assert l.exists()\n l.status = Status.FOUND\n assert l.exists()\n\n with pytest.raises(TypeError):\n l.status = 1\n\n\n@pytest.mark.timeout(3)\ndef test_not_existing_page(server):\n \"\"\" emulating slow broken server \"\"\"\n\n address = server.router({\n '^/$': Page(\"\").unlock_after(3).slow().exists(),\n })\n\n l = Link(address)\n assert l.status == Status.UNDEFINED\n\n # timed out\n assert not l.exists(retries=2)\n # setting new status\n l.status = Status.NOT_FOUND\n\n # page is unlocked, but response is cached!\n assert not l.exists()\n\n with pytest.raises(TypeError):\n l.status = 2\n\n\ndef test_redirected_page(server):\n \"\"\" Should raise IgnoredURL if Ignored \"\"\"\n address = server.router({\n '^/$': Page(\"\").redirects(pattern=\"https://google.com/?%s\"),\n })\n\n l = Link(address)\n assert l.status == Status.UNDEFINED\n with pytest.raises(DeadlinksRedirectionURL):\n l.exists()\n\n with pytest.raises(TypeError):\n l.status = 0\n\n\ndef test_ignored_page(server):\n \"\"\" Should raise IgnoredURL if Ignored \"\"\"\n address = server.router({\n '^/$': Page(\"\").exists(),\n })\n\n l = Link(address)\n assert l.status == Status.UNDEFINED\n l.status = Status.IGNORED\n assert l.status == Status.IGNORED\n with pytest.raises(DeadlinksIgnoredURL):\n l.exists()\n\n with pytest.raises(TypeError):\n l.status = 3\n\n\ndef test_same_url(server):\n page = \"same link, a\"\n address = server.router({\n '^/$': Page(page.format(*server.sa)).exists(),\n '^/link$': Page(\"ok\").exists(),\n })\n l = Link(address)\n assert l.exists()\n assert address in l.links\n\n\ndef test_not_available_page():\n \"\"\" ok server, but ip with error \"\"\"\n\n l = Link(\"http://127.0.0.1:79\")\n assert l.status == Status.UNDEFINED\n assert not l.exists()\n assert \"Failed to establish a new connection\" in l.message\n","sub_path":"unittests/test_links.py","file_name":"test_links.py","file_ext":"py","file_size_in_byte":7752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"589710462","text":"from django import template\n\nregister = template.Library()\n# {% load my_extras %} in template\n@register.filter(name='cutOne')\ndef cutOne(value,arg):\n \"\"\"\n this cuts out all values of 'arg'\n \"\"\"\n return value.replace(arg,'Redacted')\n\n# register.filter('cut',cut)\n","sub_path":"blog_project/mysite/blog/templates/blog/learning_templates/basic_app/templatetags/my_extras.py","file_name":"my_extras.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"279671662","text":"import datetime, requests, time\nimport pandas as pd\n\nfrom .helper import states, crime_types, data_types\nclass StateReport:\n final_data = {}\n def __init__(self, year='', filename=''):\n if year == '':\n year = str(datetime.datetime.now().year)\n else:\n year = str(year)\n self.raw_data_url= f\"https://www.ic3.gov/media/annualreport/{year}State/stats?s=\"\n self.states = states\n self.crime_types = crime_types\n self.filetime = datetime.datetime.now().strftime(\"%m_%d_%Y_%H_%M\")\n self.filename = filename\n print(f\"Url is {self.raw_data_url}\")\n \n def populate_state_data(self, data):\n state_data = {}\n state_data[data_types[0]] = dict(zip(self.crime_types, data[0][0] + data[0][1]))\n state_data[data_types[1]] = dict(zip(self.crime_types, data[0][2] + data[0][3]))\n state_data[data_types[2]] = dict(zip(self.crime_types, data[0][4] + data[0][5]))\n state_data[data_types[3]] = dict(zip(self.crime_types, data[0][6] + data[0][7]))\n return state_data\n \n def run(self, parse_sleep=.1):\n print(\"Starting to parse...\")\n # Initial call to print 0% progress\n length = len(self.states)\n self.printProgressBar(0, length, prefix = 'Progress:', suffix = 'Complete', length = length)\n try:\n r = requests.get(self.raw_data_url+str(1))\n except:\n pass\n for i,v in enumerate(states, 1):\n r = requests.get(self.raw_data_url+str(i))\n if r.ok:\n data = r.json()\n self.final_data[v] = self.populate_state_data(data)\n time.sleep(parse_sleep)\n self.printProgressBar(i + 1, length, prefix = 'Progress:', suffix = 'Complete', length = length)\n else:\n print(\"r.ok\", r.ok)\n assert False, \"Couldn't get response from server try running url with hand\"\n print(\"Finished parsing...\")\n print(\"You can access raw data with final_data or Extract using extract\")\n \n def extract(self, sheeted=True, to=''):\n \"\"\"\n Create excel file\n sheeted: If true create new sheet for every country\n to: XLSX, CSV, XLS \n \"\"\" \n if to == '':\n to = 'xlsx'\n print(f\"Extracting file... [sheeted={sheeted}|to={to}]\")\n if sheeted: #write xlsx anyway\n filename = self.filetime + '.xlsx'\n writer = pd.ExcelWriter(filename, engine='xlsxwriter')\n for state,d in self.final_data.items():\n # states.append(state)\n df = pd.DataFrame.from_dict(d)\n if len(state) >= 31:\n state = state[:30]\n df.to_excel(writer, sheet_name=state)\n writer.save()\n print(f\"Extracted sheeted to {filename}\")\n else: #grouped\n states = []\n frames = []\n filename = self.filetime + '.' + to\n for state,d in self.final_data.items():\n states.append(state)\n frames.append(pd.DataFrame.from_dict(d))\n country_based = pd.concat(frames, keys=states)\n if to == 'xlsx':\n country_based.to_excel(filename, engine='xlsxwriter')\n elif to == 'csv':\n country_based.to_csv(filename)\n print(f\"Extracted to {filename}\")\n \n def printProgressBar (self, iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n \"\"\"\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total: \n print()\n\n \n","sub_path":"statereport/state_report.py","file_name":"state_report.py","file_ext":"py","file_size_in_byte":4510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"579015029","text":"import plotly.graph_objects as go\nimport plotly.express as px\n\n\ndef QuickTable():\n\n headerColor = 'grey'\n rowEvenColor = 'lightgrey'\n rowOddColor = 'white'\n\n fig = go.Figure(data=[go.Table(\n header=dict(\n values=['EXPENSES','Q1','Q2','Q3','Q4'],\n line_color='darkslategray',\n fill_color=headerColor,\n align=['left','center'],\n font=dict(color='white', size=12)\n ),\n cells=dict(\n values=[\n ['Salaries', 'Office', 'Merchandise', 'Legal', 'TOTAL'],\n [1200000, 20000, 80000, 2000, 12120000],\n [1300000, 20000, 70000, 2000, 130902000],\n [1300000, 20000, 120000, 2000, 131222000],\n [1400000, 20000, 90000, 2000, 14102000]],\n line_color='darkslategray',\n # 2-D list of colors for alternating rows\n fill_color = [[rowOddColor,rowEvenColor,rowOddColor, rowEvenColor,rowOddColor]*5],\n align = ['left', 'center'],\n font = dict(color = 'darkslategray', size = 11)\n ))\n ])\n\n # #fig.show()\n # x= fig.write_image(\"images/fig1.png\")\n\n return fig.to_image(format=\"png\")\n\n\nif __name__ == \"__main__\":\n from IPython.display import Image\n\n Image(QuickTable())\n \n\n # pic = QuickTable()\n # pic.show()\n","sub_path":"src/Plotly_Test.py","file_name":"Plotly_Test.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"473354529","text":"def _foo_binary_impl(ctx):\n out = ctx.actions.declare_file(ctx.label.name)\n ctx.actions.write(\n output = out,\n content = \"Hello {}!\\n\".format(ctx.attr.username),\n )\n return [DefaultInfo(files = depset([out]))]\n \nfoo_binary = rule(\n implementation = _foo_binary_impl,\n attrs = {\n \"username\": attr.string(),\n },\n)\n\nprint(\"bzl file evaluation\")","sub_path":"main/foo.bzl","file_name":"foo.bzl","file_ext":"bzl","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"19537630","text":"matrix = [\n [1,2,3],\n [4,5,6],\n [7,8,9]\n]\n\n#row contains the first list e.g: [1,2,3]\nfor row in matrix:\n for item in row:\n print(item)\n","sub_path":"2dlistloop.py","file_name":"2dlistloop.py","file_ext":"py","file_size_in_byte":154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"592700198","text":"import math\nclass Solution:\n\n def isPalindromeLog(self, x):\n if x < 0:\n return False\n if x == 0:\n return True\n digits = math.floor(math.log10(x))\n for i in range(digits + 1):\n f = math.floor(x / pow(10, digits - i)) % 10\n l = math.floor(x / pow(10, i)) % 10\n if f != l:\n return False\n return True\n\n def isPalindrome(self, x):\n if x < 0:\n return False\n i = res = 0\n tmp_x = x\n while tmp_x:\n tmp_x, r = divmod(tmp_x, 10)\n res = res * 10 + r\n return res == x\n\ndef main():\n tests = [\n [12321, True],\n [111, True],\n [19, False],\n [0, True],\n [10000000, False],\n [100000001, True],\n [-2147483648, False],\n [-1, False],\n ]\n s = Solution()\n for t in tests:\n assert s.isPalindromeLog(t[0]) == t[1]\n assert s.isPalindrome(t[0]) == t[1]\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python/9_is_palindrome.py","file_name":"9_is_palindrome.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"439805021","text":"import wget\nimport json\n\ndef load_data():\n url = 'https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json'\n filename = wget.download(url)\n\n with open(filename, 'r') as f:\n source_data = json.load(f)\n return source_data","sub_path":"datasets/squad.py","file_name":"squad.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"170887819","text":"users = [\n {\"id\": 0, \"name\": \"Hero\"}, {\"id\": 1, \"name\": \"Dunn\"}, {\"id\": 2, \"name\": \"Sue\"}, {\"id\": 3, \"name\": \"Chi\"},\n {\"id\": 4, \"name\": \"Thor\"}, {\"id\": 5, \"name\": \"Clive\"}, {\"id\": 6, \"name\": \"Hicks\"}, {\"id\": 7, \"name\": \"Devin\"},\n {\"id\": 8, \"name\": \"Kate\"}, {\"id\": 9, \"name\": \"Klein\"},\n]\n\nfriendship_pairs = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4), (4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]\n# przeglądanie takich danych jest czasochlonne, dlatego zrobimy z tego slownik gdzie\n# kluczami beda id uzytkownikow a wartosciami lista id uzytkowników z ktorymi sie przyjazdni. np {0:[1,2], 1:[0,2,3],..}\n# Aby to zrobic musimy raz przejsc powyzsze struktury danych users i frendship_pairs.\n# zauwaz ze warto miec pewnosc (tutaj taka pewnosc istnieje) zeby id bylo niepowtarzalne w ramach listy users.\n\n# inicjalizowanie slownika pustymi listami dla kazdego user id:\nfriendships = {user[\"id\"]: [] for user in users}\n\nfor i, j in friendship_pairs:\n print(i, \" \", j)\n\nfor i, j in friendship_pairs:\n friendships[i].append(j)\n friendships[j].append(i)\n\n\n# funkcja pobiera slownik z listy users, zwraca dlugosc listy ze słownika friendships\ndef number_of_friends(user: dict) -> int:\n user_id = user[\"id\"]\n f_ids = friendships[user_id]\n return len(f_ids)\n\n\n# funkcja zwracająca dl listy ze slownika friendship\ndef number_of_friends1(key: int) -> int:\n return len(friendships[key])\n\n\n# lista uzytkownikow z liczbą przyjacioł [(0,2), (1,3)]\nnum_friends_by_id = [(user[\"id\"], number_of_friends(user)) for user in users]\nnum_friends_by_id1 = [(k, number_of_friends1(k)) for k in friendships]\n\n\n# posortujmy num_firends_by_id wedlug number_of_friends\nnum_friends_by_id.sort(key=lambda id_and_friends: id_and_friends[1], reverse=True)\n","sub_path":"data_science/ds_1.py","file_name":"ds_1.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"523229186","text":"import os\r\nimport googlemaps\r\nimport time\r\nimport logging\r\nfrom datetime import datetime\r\n\r\nlogger = logging.getLogger('air_to_tx_ctr')\r\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')\r\n\r\ngmaps = googlemaps.Client(key='AIzaSyDkJsLTaJLAv3z2vf_voSqcNIv6DkH_6Q0')\r\n\r\ninput_file = \"Y:/LSAM/PotentialChallenges/TransTime/airport_to_tx_ctr_v2_timezone.tsv\"\r\nexpectHours = set([8, 10, 12, 14, 17, 22])\r\nlocalTimeZone = -6\r\n\r\nestimated = {}\r\nheaders=\"\"\r\nwith open(input_file) as f:\r\n headers=f.readline().rstrip()\r\n for line in f:\r\n parts = line.rstrip().split('\\t')\r\n uniqueId = parts[1] + \"->\" + parts[5]\r\n estimated[uniqueId] = {\"parts\":parts,\r\n \"timezone\":int(parts[8]) - localTimeZone,\r\n \"hourmap\":{hour:False for hour in expectHours}}\r\n\r\ndef checkAllDone(estimated):\r\n notDone = 0\r\n total = 0\r\n for cityMap in estimated.values():\r\n for done in cityMap[\"hourmap\"].values():\r\n total += 1\r\n if not done:\r\n notDone += 1\r\n logger.info(\"%d out of %d queries in waiting list\\n\" % (notDone, total))\r\n return (notDone == 0)\r\n\r\ndef getResultFile():\r\n dt = datetime.now()\r\n curtime = \"%d_%d_%d\" % (dt.year, dt.month, dt.day)\r\n return (\"Y:/LSAM/PotentialChallenges/TransTime/airport_to_tx_ctr_v3_with_distance_and_time_by_departure_time_%s.tsv\" % curtime)\r\n\r\nlastFile = \"\"\r\nwhile(True):\r\n result_file = getResultFile()\r\n if lastFile != result_file:\r\n logger.info(\"Saving result to %s\\n\" % result_file)\r\n lastFile = result_file\r\n\r\n for value in estimated.values():\r\n value[\"hourmap\"] = {hour:False for hour in expectHours}\r\n\r\n found = []\r\n bEntryDeleted = False\r\n if os.path.isfile(result_file):\r\n with open(result_file) as f:\r\n found = f.readlines()\r\n if len(found) > 0:\r\n del found[0]\r\n if len(found) > 0:\r\n parts = found[-1].split('\\t')\r\n if len(parts) < 14:\r\n del found[-1]\r\n bEntryDeleted = True\r\n for line in found:\r\n parts = line.rstrip().split('\\t')\r\n uniqueId = parts[1] + \"->\" + parts[5]\r\n hourMap = estimated[uniqueId][\"hourmap\"]\r\n hourMap[int(parts[10])] = True\r\n\r\n if checkAllDone(estimated):\r\n logger.info(\"Today all done, waiting 10 miniutes\\n\")\r\n time.sleep(600)\r\n continue\r\n\r\n if len(found) == 0:\r\n with open(result_file, 'w') as sw:\r\n sw.write(headers + \"\\tair2tx_ctr_nashville_time\\tair2tx_ctr_localhour\\tair2tx_ctr_distance\\tair2tx_ctr_google_normal\\tair2tx_ctr_google_traffic\\n\")\r\n else:\r\n if bEntryDeleted:\r\n with open(result_file, 'w') as sw:\r\n sw.write(headers + \"\\tair2tx_ctr_nashville_time\\tair2tx_ctr_localhour\\tair2tx_ctr_distance\\tair2tx_ctr_google_normal\\tair2tx_ctr_google_traffic\\n\")\r\n for line in found:\r\n sw.write(line)\r\n\r\n while not checkAllDone(estimated):\r\n if result_file != getResultFile():\r\n break;\r\n\r\n with open(result_file, 'a') as sw:\r\n for cityMap in estimated.values():\r\n hourmap = cityMap[\"hourmap\"]\r\n for hour, done in hourmap.items():\r\n dt = datetime.now()\r\n curhour = dt.hour\r\n remotehour = (curhour + cityMap[\"timezone\"] + 24) % 24\r\n curtime = \"%d-%d-%d:%d:%d\" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)\r\n if not done and remotehour == hour:\r\n parts = cityMap[\"parts\"]\r\n hospital_address = parts[6]\r\n air_address = parts[7]\r\n print(\"%d : %s => %s\" %(remotehour, air_address, hospital_address))\r\n res = gmaps.distance_matrix([air_address], [hospital_address] , mode=\"driving\", departure_time=dt)\r\n if res[\"status\"] == \"OK\":\r\n actual_res = res[\"rows\"][0][\"elements\"][0]\r\n if actual_res[\"status\"] == \"OK\":\r\n if \"duration_in_traffic\" in actual_res:\r\n sw.write(\"%s\\t%s\\t%d\\t%s\\t%.6f\\t%.6f\\n\" % (\"\\t\".join(parts), curtime, remotehour, actual_res[\"distance\"][\"text\"], actual_res[\"duration\"][\"value\"] / 3600.0, actual_res[\"duration_in_traffic\"][\"value\"] / 3600.0 ))\r\n else:\r\n sw.write(\"%s\\t%s\\t%d\\t%s\\t%.6f\\t\\n\" % (\"\\t\".join(parts), curtime, remotehour, actual_res[\"distance\"][\"text\"], actual_res[\"duration\"][\"value\"] / 3600.0 ))\r\n else:\r\n sw.write(\"%s\\t%s\\t%d\\t\\t\\t\\n\" % (\"\\t\".join(parts), curtime, remotehour ))\r\n else:\r\n sw.write(\"%s\\t%d\\t\\t\\t\\n\" % (\"\\t\".join(parts), curtime, remotehour))\r\n hourmap[hour]=True\r\n time.sleep(2)\r\n logger.info(\"waiting 2 miniutes\\n\")\r\n time.sleep(120)","sub_path":"shyr/20161020_lsam/step08_get_air_tx_ctr_hour_by_departuretime.py","file_name":"step08_get_air_tx_ctr_hour_by_departuretime.py","file_ext":"py","file_size_in_byte":4622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"454069443","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nfrom os.path import join, isfile, basename\nfrom os import listdir\n\nfrom unify_definition import unify_stroke\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nimport pickle\nimport json\n\ndef get_letters(path):\n files = [join(path, f) for f in listdir(path) if isfile(join(path, f))]\n letters = {}\n for f in files:\n data = np.loadtxt(f)\n key = basename(f)[0]\n if not key in letters:\n letters[key] = []\n letters[key].append(data)\n return letters\n\ndef make_core(letters, points):\n core = {}\n for key, letters_group in letters.items():\n unified = np.array([unify_stroke(letter, points) for letter in letters_group])\n\n centers = np.mean(unified, axis=0)\n\n R = []\n\n for u in unified:\n dists = u - centers\n radius = np.linalg.norm(dists, axis=1)\n R.append(radius)\n x = u[:, 0]\n y = u[:, 1]\n z = u[:, 2]\n plt.plot(x/(1+y), -z/(1+y), '.-.' )\n\n R = np.max(np.array(R), axis=0)\n\n core[key] = np.hstack((centers, np.array([R]).T))\n x = centers[:, 0]\n y = centers[:, 1]\n z = centers[:, 2]\n plt.plot(x/(1+y), -z/(1+y) , linewidth=5)\n #plt.show()\n return core\n\nif __name__ == '__main__':\n letters = get_letters('letters')\n \n segmentation = 32\n core = make_core(letters, segmentation)\n dump_data = {'segmentation': segmentation}\n dump_data['letters'] = {key: data.tolist() for key, data in core.items()}\n with open('core.txt', 'w') as f:\n json.dump(dump_data, f, indent=1)","sub_path":"proof-of-concept/make_core.py","file_name":"make_core.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"572812494","text":"import math\n# TO-DO: complete the helpe function below to merge 2 sorted arrays\n\n\ndef merge(arrA, arrB):\n print(f'run: {arrA}, {arrB}')\n elements = len(arrA) + len(arrB)\n merged_arr = [0] * elements\n\n # # TO-DO\n i, j, k = 0, 0, 0 # i= arrA counter, j= arrB counter, k= arr counter\n\n # run until left or right is out\n while i < len(arrA) and j < len(arrB):\n # if current arrA val is < current arrB val; assign to master list\n if arrA[i] < arrB[j]:\n merged_arr[k] = arrA[i]\n i += 1\n k += 1\n # else assign arrB to master\n else:\n merged_arr[k] = arrB[j]\n j += 1\n k += 1\n\n # handle remaining items in remaining list\n remaining = arrA if i < j else arrB\n r = i if remaining == arrA else j\n\n while r < len(remaining):\n merged_arr[k] = remaining[r]\n r += 1\n k += 1\n\n print(f'merged_arr: {merged_arr}')\n\n return merged_arr\n\n\n# TO-DO: implement the Merge Sort function below USING RECURSION\ndef merge_sort(arr):\n # TO-DO\n result = []\n if len(arr) == 0:\n return result\n elif len(arr) == 1:\n return arr\n else:\n middle = int(len(arr)/2)\n print(f'middle: {middle}')\n arr1 = merge_sort(arr[:middle])\n arr2 = merge_sort(arr[middle:])\n print(f'arr1: {arr1}, arr2: {arr2}')\n result = merge(arr1, arr2)\n return result\n\n# STRETCH: implement an in-place merge sort algorithm\n\n\ndef merge_in_place(arr, start, mid, end):\n # TO-DO\n middle = mid\n print(f'middle: {middle}')\n arrA = merge_sort(arr[:middle])\n arrB = merge_sort(arr[middle:])\n print(f'arr1: {arrA}, arr2: {arrB}')\n\n i, j, k = 0, 0, 0 # i= arrA counter, j= arrB counter, k= arr counter\n\n # run until left or right is out\n while i < len(arrA) and j < len(arrB):\n # if current arrA val is < current arrB val; assign to master list\n if arrA[i] < arrB[j]:\n arr[k] = arrA[i]\n i += 1\n k += 1\n # else assign arrB to master\n else:\n arr[k] = arrB[j]\n j += 1\n k += 1\n\n # handle remaining items in remaining list\n remaining = arrA if i < j else arrB\n r = i if remaining == arrA else j\n\n while r < len(remaining):\n arr[k] = remaining[r]\n r += 1\n k += 1\n return arr\n\n\ndef merge_sort_in_place(arr, l, r):\n # TO-DO\n result = []\n if len(arr) == 0:\n return result\n elif len(arr) == 1:\n return arr\n else:\n return merge_in_place(arr, l, int(len(arr)/2), (r + 1))\n\n# STRETCH: implement the Timsort function below\n# hint: check out https://github.com/python/cpython/blob/master/Objects/listsort.txt\n\n\ndef timsort(arr):\n\n return arr\n\n\nlistTest = [0, 3, 4, 1, 2, 3, 4, 5, 7]\nprint(merge_sort(listTest))\n# print(merge_sort_in_place(listTest, 0, len(listTest)-1))\n","sub_path":"src/recursive_sorting/recursive_sorting.py","file_name":"recursive_sorting.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"153766306","text":"from behave import step\n\nfrom .forms import get_edit_form\nfrom .forms import clear_input_and_send_keys\n\nfrom .buttons import clicketi_click\nfrom .buttons import click_button_from_collection\n\nfrom .utils import safe_get_element_text, get_page_element, expand_shadow_root\n\nfrom time import sleep\n\n\ndef add_new_rule(context, operator, rtype='all', raction='all', rid='',\n rtags=''):\n operator = operator.lower()\n rtype = rtype.lower()\n raction = raction.lower()\n rid = rid.lower()\n rtags = rtags.lower()\n\n _, team_page = get_page_element(context, 'teams', 'team')\n team_page_shadow = expand_shadow_root(context, team_page)\n form = team_page_shadow.find_element_by_css_selector('team-policy')\n form_shadow = expand_shadow_root(context, form)\n new_rule = form_shadow.find_element_by_css_selector(\n '#rules > rbac-rule-item:nth-last-child(2)')\n new_rule_shadow = expand_shadow_root(context, new_rule)\n\n if operator not in ['allow', 'deny']:\n raise Exception('Operator must be either allow or deny')\n\n if operator == 'allow':\n toggle_button = new_rule_shadow.find_element_by_css_selector('paper-toggle-button')\n clicketi_click(context, toggle_button)\n\n if rtype != 'all':\n rtype_drop = new_rule_shadow.\\\n find_element_by_css_selector('span.resource').\\\n find_element_by_css_selector('paper-dropdown-menu')\n clicketi_click(context, rtype_drop)\n sleep(1)\n click_button_from_collection(context, rtype,\n rtype_drop.find_elements_by_css_selector('paper-item'))\n\n if raction != 'all':\n raction_drop = new_rule_shadow.\\\n find_element_by_css_selector('span.action').\\\n find_element_by_css_selector('paper-dropdown-menu')\n clicketi_click(context, raction_drop)\n sleep(3)\n click_button_from_collection(context, raction,\n raction_drop.find_elements_by_css_selector('paper-item'))\n rule_identifier = new_rule_shadow.find_element_by_css_selector('rbac-rule-identifier')\n rule_identifier_shadow = expand_shadow_root(context, rule_identifier)\n\n rcondition = rule_identifier_shadow.find_element_by_css_selector('paper-dropdown-menu')\n\n if rid:\n clicketi_click(context, rcondition)\n sleep(1)\n click_button_from_collection(context, 'where id',\n rcondition.find_elements_by_css_selector('paper-item'))\n sleep(1)\n rid_drop = rule_identifier_shadow. \\\n find_elements_by_css_selector('paper-dropdown-menu')[-1]\n clicketi_click(context, rid_drop)\n sleep(1)\n click_button_from_collection(context, rid,\n rid_drop.find_elements_by_css_selector('paper-item'))\n\n if rtags:\n clicketi_click(context, rcondition)\n sleep(1)\n click_button_from_collection(context, 'where tags',\n rcondition.find_elements_by_css_selector('paper-item'))\n sleep(3)\n edit_icon = rule_identifier_shadow. \\\n find_element_by_css_selector('.edit')\n clicketi_click(context, edit_icon)\n clicketi_click(context, edit_icon)\n paper_input = rule_identifier_shadow.find_element_by_css_selector('paper-input#inputField')\n paper_input_shadow = expand_shadow_root(context, paper_input)\n input_element = paper_input_shadow.find_element_by_css_selector('input')\n input_element.send_keys(rtags)\n\n\n@step(u'I add the rule \"{operator}\" \"{rtype}\" \"{raction}\" where id = \"{rid}\"')\ndef add_new_rule_with_rid(context, operator, rtype, raction, rid):\n add_new_rule(context, operator, rtype, raction, rid)\n\n\n@step(u'I add the rule \"{operator}\" \"{rtype}\" \"{raction}\" where tags = '\n u'\"{rtags}\"')\ndef add_new_rule_with_rtags(context, operator, rtype, raction, rtags):\n add_new_rule(context, operator, rtype, raction, rtags=rtags)\n\n\n@step(u'I add the rule always \"{operator}\" \"{rtype}\" \"{raction}\"')\ndef add_new_rule_always(context, operator, rtype, raction):\n add_new_rule(context, operator, rtype, raction)\n\n\n@step(u'I remove the rule with index \"{index}\"')\ndef delete_rule(context, index):\n _, team_page = get_page_element(context, 'teams', 'team')\n team_page_shadow = expand_shadow_root(context, team_page)\n form = team_page_shadow.find_element_by_css_selector('team-policy')\n form_shadow = expand_shadow_root(context, form)\n rules = [expand_shadow_root(context, rule) for rule in form_shadow.find_elements_by_css_selector('rbac-rule-item')]\n for rule in rules:\n index_class = rule.find_element_by_css_selector('.index')\n rule_index = safe_get_element_text(index_class)\n rule_index = rule_index.replace('.','')\n if rule_index == index:\n delete_btn = rule.find_element_by_css_selector('.delete')\n icon = delete_btn.find_element_by_css_selector('iron-icon')\n clicketi_click(context, icon)\n return\n assert False, \"There is no rule with index %s\" % index\n\n\ndef check_rule_exists(context, rule_number, operator, rtype, raction, rid, rtags):\n rule_number = int(rule_number)\n operator = operator.lower()\n if operator not in ['allow', 'deny']:\n raise Exception('Operator must be either allow or deny')\n rtype = rtype.lower()\n raction = raction.lower()\n rid = rid.lower()\n rtags = rtags.lower()\n\n _, team_page = get_page_element(context, 'teams', 'team')\n team_page_shadow = expand_shadow_root(context, team_page)\n form = team_page_shadow.find_element_by_css_selector('team-policy')\n form_shadow = expand_shadow_root(context, form)\n rules = [expand_shadow_root(context, rule) for rule in form_shadow.find_elements_by_css_selector('rbac-rule-item')]\n rule = rules[rule_number]\n rule_operator = safe_get_element_text(\n rule.find_element_by_css_selector('span.operator')).strip().lower()\n assert operator == rule_operator, \"Operator is not %s\" % operator\n\n rule_resource = rule.find_element_by_css_selector('span.resource').\\\n find_element_by_css_selector('input#input').get_attribute('value').\\\n strip().lower()\n\n assert rtype == rule_resource, \"Resource type is not %s\" % rtype\n\n rule_action = rule.find_element_by_css_selector('span.action').\\\n find_element_by_css_selector('input#input').get_attribute('value').\\\n strip().lower()\n\n assert raction == rule_action, \"Rule action is not %s\" % raction\n\n rcondition = rule.find_element_by_css_selector('span.identifier').\\\n find_elements_by_css_selector('input#input')[0].get_attribute('value').\\\n strip().lower()\n\n if not rid and not rtags:\n assert rcondition == 'always', \"Rule condition is not always\"\n\n if rid:\n assert rcondition == 'where id', \"Rule condition is not always\"\n rule_id = rule.find_element_by_css_selector('span.identifier'). \\\n find_elements_by_css_selector('input#input')[1].\\\n get_attribute('value').strip().lower()\n assert rid == rule_id, \"Rule id is not %s\" % rid\n\n if rtags:\n assert rcondition == 'where tags', \"Rule condition is not always\"\n rule_tags = rule.find_element_by_css_selector('span.identifier'). \\\n find_elements_by_css_selector('input#input')[2].\\\n get_attribute('value').strip().lower()\n assert rtags == rule_tags, \"Rule tag is not %s\" % rtags\n\n\n@step(u'rule \"{rule_number}\" is \"{operator}\" \"{rtype}\" \"{raction}\" where tags'\n u' = \"{rtags}\"')\ndef check_rule_with_rtags(context, rule_number, operator, rtype, raction, rtags):\n check_rule_exists(context, rule_number, operator, rtype, raction, '', rtags)\n\n\n@step(u'rule \"{rule_number}\" is \"{operator}\" \"{rtype}\" \"{raction}\" where id = '\n u'\"{rid}\"')\ndef check_rule_with_rid(context, rule_number, operator, rtype, raction, rid):\n check_rule_exists(context, rule_number, operator, rtype, raction, rid, '')\n\n\n@step(u'rule \"{rule_number}\" is \"{operator}\" \"{rtype}\" \"{raction}\" always')\ndef check_rule_always(context, rule_number, operator, rtype, raction):\n check_rule_exists(context, rule_number, operator, rtype, raction, '', '')\n","sub_path":"misttests/integration/gui/steps/policy.py","file_name":"policy.py","file_ext":"py","file_size_in_byte":8250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"469296200","text":"import json\nimport unittest\nfrom datetime import datetime\n\nfrom tests.mock.mock_rest import MockRest\nfrom timbermill import timberlog_event_handler, timberlog\nfrom timbermill import timberlog_consts as consts\nfrom timbermill.timberlog import timberlog_start\n\n\nclass TestTimberlog(unittest.TestCase):\n def setUp(self):\n timberlog.init('test')\n mock_rest = MockRest()\n timberlog_event_handler.rest_client = mock_rest\n timberlog_event_handler.ENABLE_PYTHORAMA_TIMBERMILL_2 = True\n\n def test_start_task(self):\n timberlog.start(\"test_start_task\")\n timberlog.add_context(attr1='attr1', attr2='attr2')\n timberlog.success()\n\n start_task = self.check_event_type(consts.EVENT_TYPE_START)\n info_task = self.check_event_type(consts.EVENT_TYPE_INFO)\n success_task = self.check_event_type(consts.EVENT_TYPE_END_SUCCESS)\n start_task_id, __ = self.get_task_id(start_task)\n info_task_id, __ = self.get_task_id(info_task)\n success_task_id, __ = self.get_task_id(success_task)\n self.assertEqual(start_task_id, info_task_id)\n self.assertEqual(start_task_id, success_task_id)\n\n def test_spot_task(self):\n timberlog.spot(\"test_spot\", context={'status': 'OK'}, metrics={'test_num': 2, 'key': 102})\n spot_task = self.check_event_type(consts.EVENT_TYPE_SPOT)\n task_id, parent_id = self.get_task_id(spot_task)\n self.assertEqual(consts.EVENT_TYPE_SPOT, spot_task['@type'])\n\n def test_with_statement(self):\n with timberlog.start_task(\"task_with_statement\") as timberlogContext:\n timberlogContext.info(context={'status': 'OK'})\n self.check_event_type(consts.EVENT_TYPE_START)\n self.check_event_type(consts.EVENT_TYPE_INFO)\n self.check_event_type(consts.EVENT_TYPE_END_SUCCESS)\n\n def test_end_with_error(self):\n self.assertRaises(Exception, self.end_with_error_func)\n self.check_event_type(consts.EVENT_TYPE_START)\n self.check_event_type(consts.EVENT_TYPE_INFO)\n self.check_event_type(consts.EVENT_TYPE_INFO)\n self.check_event_type(consts.EVENT_TYPE_END_ERROR)\n\n def test_info_without_start_task(self):\n timberlog.add_context(attr1='attr1', attr2='attr2')\n event_task = self.check_event_type(consts.EVENT_TYPE_SPOT)\n self.assertEqual(event_task['name'], consts.LOG_WITHOUT_CONTEXT)\n\n def test_end_without_start_task(self):\n timberlog.success()\n event_task = self.check_event_type(consts.EVENT_TYPE_SPOT)\n self.assertEqual(event_task['name'], consts.END_WITHOUT_START)\n\n def test_inner_task(self):\n with timberlog.start_task(\"first_task\") as first_timberlog_context:\n first_timberlog_context.info(context={'attr1': 'attr1'})\n with timberlog.start_task(\"second_task\") as second_timberlog_context:\n second_timberlog_context.info(context={'attr2': 'attr2'})\n\n first_start_task = self.check_event_type(consts.EVENT_TYPE_START)\n self.check_event_type(consts.EVENT_TYPE_INFO)\n second_start_task = self.check_event_type(consts.EVENT_TYPE_START)\n self.check_event_type(consts.EVENT_TYPE_INFO)\n self.check_event_type(consts.EVENT_TYPE_END_SUCCESS)\n first_success_task = self.check_event_type(consts.EVENT_TYPE_END_SUCCESS)\n\n first_task_id, first_parent_id = self.get_task_id(first_start_task)\n second_task_id, second_parent_id = self.get_task_id(second_start_task)\n self.assertEqual(first_task_id, second_parent_id)\n first_end_task_id, __ = self.get_task_id(first_success_task)\n self.assertEqual(first_task_id, first_end_task_id)\n\n @timberlog_start\n def test_decorator(self):\n tl.info(context={'attr1': 'attr1'})\n\n def test_datetime_format(self):\n current_time = timberlog_event_handler.get_current_time_formatted()\n plus_days = 7\n current_time_plus_7_days = timberlog_event_handler.get_current_time_formatted(plus_days)\n\n datetime_format = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n try:\n datetime.strptime(current_time, datetime_format)\n except:\n self.fail('Current datetime format is invalid..')\n try:\n datetime.strptime(current_time_plus_7_days, datetime_format)\n except:\n self.fail('Plus days datetime format is invalid..')\n\n def check_event_type(self, event_type):\n event_task_json = timberlog_event_handler.rest_client.get_request(event_type)\n event_task = json.loads(event_task_json)\n assert event_task['@type'] == event_type\n\n return event_task\n\n def end_with_error_func(self):\n with timberlog.start_task(\"end_with_error_task\") as timberlogContext:\n timberlogContext.info(context={'status': 'OK'})\n timberlogContext.info(metrics={'test_num': 4, 'key': 104})\n raise Exception('test exception')\n\n def get_task_id(self, event_task):\n parent_id = None\n task_id = event_task[consts.TASK_ID]\n if 'parentId' in event_task:\n parent_id = event_task['parentId']\n\n return task_id, parent_id\n","sub_path":"timbermill-python/tests/test_timberlog.py","file_name":"test_timberlog.py","file_ext":"py","file_size_in_byte":5157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"223195808","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import include\nfrom django.conf.urls import url\nfrom django.contrib import admin\n\nurlpatterns = [\n url(r'^providers/', include(\"provider.urls\", namespace='provider')),\n url(r'^members/', include(\"member.urls\", namespace='member')),\n url(r'^query/', include(\"query.urls\", namespace='query')),\n url(r'^auth/', include(\"login.urls\", namespace='login')),\n url(r'^areas/', include(\"area.urls\", namespace='area')),\n url(r'^admin/', admin.site.urls), \n]\n","sub_path":"mozio/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"185455722","text":"import json\nimport pickle\nimport threading\nfrom nerd_log_helper import logger\nfrom sqlalchemy import create_engine\nfrom database import MY_SQL_HOST_URL\nfrom sqlalchemy.orm import sessionmaker\nfrom models.applications import Applications\nfrom api.common_helpers.common_constants import TrainingStates\nfrom api.services.application_service import ApplicationService\nfrom archaea.machine_learning.linear_regression.lin_reg_trainer import LinearRegressionTrainer\nfrom archaea.machine_learning.model_persistance.sci_learn_model import SciLearnModelPersistenceHelper\n\n\nclass LinearRegressionService:\n\n def __init__(self):\n pass\n\n @staticmethod\n def compute_linear_regression(application=None, data=None, options=None):\n if application is None:\n raise Exception('Unknown application')\n if data is None:\n raise Exception('No data found to do the computation')\n app_metadata = json.loads(application.app_metadata)\n weights = app_metadata['weights']\n lr_object = pickle.loads(weights)\n lr = SciLearnModelPersistenceHelper.initialize_model_with_state(lr_object)\n lr_trainer = LinearRegressionTrainer(lr)\n prediction = lr_trainer.predict(data)\n return prediction.tolist()\n\n @staticmethod\n def train_linear_regression(application=None, training_data=None, options=None):\n if application is None:\n raise Exception('Unknown application')\n if training_data is None:\n raise Exception('No data found for training')\n app_metadata = json.loads(application.app_metadata)\n weights = app_metadata['weights']\n lr_object = pickle.loads(weights)\n lr = SciLearnModelPersistenceHelper.initialize_model_with_state(lr_object)\n lr_trainer_thread = LinearRegressionTrainWorkerThread(lr=lr,\n data_points=training_data['data_points'],\n expectations=training_data['expectations'],\n application=application)\n training_status = json.loads(application.training_status)\n training_status['status'] = TrainingStates.STARTED\n ApplicationService.update_application(query={\n 'application_id': application.application_id\n }, update_value={\n 'training_status': json.dumps(training_status)\n })\n lr_trainer_thread.start()\n return training_status\n\n\nclass LinearRegressionTrainWorkerThread(threading.Thread):\n\n def __init__(self, lr, data_points, expectations, application):\n super(LinearRegressionTrainWorkerThread, self).__init__()\n self.lr = lr\n self.data_points = data_points\n self.expectations = expectations\n self.application = application\n\n def run(self):\n try:\n lr_trainer = LinearRegressionTrainer(self.lr)\n lr_trainer.train(self.data_points, self.expectations)\n model_object = SciLearnModelPersistenceHelper.get_model_state(self.lr)\n weights = pickle.dumps(model_object)\n training_status = json.loads(self.application.training_status)\n training_status['status'] = TrainingStates.DONE\n training_status['reference'] = 'trained'\n app_metadata = json.loads(self.application.app_metadata)\n app_metadata['weights'] = weights\n some_engine = create_engine(MY_SQL_HOST_URL)\n Session = sessionmaker(bind=some_engine)\n session = Session()\n try:\n session.query(Applications) \\\n .filter_by(\n application_id=self.application.application_id\n ) \\\n .update({'app_metadata': json.dumps(app_metadata),\n 'training_status': json.dumps(training_status)\n })\n session.commit()\n session.close()\n except Exception as e:\n session.rollback()\n raise Exception(e.message)\n except Exception as e:\n logger.error('Failure while training linear regression : ' + e.message)\n","sub_path":"api/services/linear_reg_service.py","file_name":"linear_reg_service.py","file_ext":"py","file_size_in_byte":4254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"98271009","text":"from numpy import array, cos, sin, tan, identity\n\ndef Txyz(d):\n T = identity(4)\n T[0:3,3] = d\n\n return T\n\n\ndef Rxyz(phi):\n rx = phi[0]\n ry = phi[1]\n rz = phi[2]\n\n R = identity(4)\n R[0:3,0:3] = array([\n [ cos(ry)*cos(rz), -cos(ry)*sin(rz), sin(ry)],\n [ cos(rx)*sin(rz) + cos(rz)*sin(rx)*sin(ry), cos(rx)*cos(rz) - sin(rx)*sin(ry)*sin(rz), -cos(ry)*sin(rx)],\n [ sin(rx)*sin(rz) - cos(rx)*cos(rz)*sin(ry), cos(rz)*sin(rx) + cos(rx)*sin(ry)*sin(rz), cos(rx)*cos(ry)]\n ])\n return R\n\ndef Rzyx(phi):\n rx = phi[0]\n ry = phi[1]\n rz = phi[2]\n\n R = identity(4)\n R[0:3,0:3] = array([\n [ cos(ry)*cos(rz), cos(rz)*sin(rx)*sin(ry) - cos(rx)*sin(rz), sin(rx)*sin(rz) + cos(rx)*cos(rz)*sin(ry)],\n [ cos(ry)*sin(rz), cos(rx)*cos(rz) + sin(rx)*sin(ry)*sin(rz), cos(rx)*sin(ry)*sin(rz) - cos(rz)*sin(rx)],\n [ -sin(ry), cos(ry)*sin(rx), cos(rx)*cos(ry)]\n ])\n return R\n\ndef InvH(H):\n R = H[0:3,0:3]\n t = H[0:3,3]\n\n invH = identity(4)\n\n invH[0:3,0:3] = R.transpose()\n invH[0:3,3] = -R.transpose().dot(t)\n\n return invH\n\n# Denavit-Hartenberg\ndef DH(theta, d, a, alpha):\n '''\n Deneavit-Hartenberg transformation matrix\n ''' \n return array([\n [ cos(theta),-sin(theta)*cos(alpha), sin(theta)*sin(alpha), a*cos(theta)],\n [ sin(theta), cos(theta)*cos(alpha),-cos(theta)*sin(alpha), a*sin(theta)],\n [ 0 , sin(alpha) , cos(alpha) , d ],\n [ 0 , 0 , 0 , 1 ]\n ])\n","sub_path":"hmi/src/math3d.py","file_name":"math3d.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"404676733","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport load_data as ld\nimport convlstm as md\nimport evaluation as ev\nimport copy\n\ndevice = torch.device('cuda')\n\ndef feed_model_data(model, grid_data_seq):\n _, hidden_states = model(grid_data_seq)\n\n return model, hidden_states\n\ndef generate_forecasts(model, hidden_states, init_grid_data, seq_len=48):\n prev_grid = init_grid_data\n meo_forecasts = []\n\n for i in range(seq_len):\n prev_grid, hidden_states = model(prev_grid[:,:,:6], hidden_states)\n meo_forecasts.append(prev_grid)\n\n return torch.cat(meo_forecasts, 1)\n\ndef maemis_loss(y_pred, y_true):\n pho = 0.05\n loss0 = torch.abs(y_pred[:,:,0] - y_true)\n loss1 = torch.max(y_pred[:,:,2]-y_pred[:,:,1],torch.tensor([0.]).cuda())\n loss2 = torch.max(y_pred[:,:,1]-y_true,torch.tensor([0.]).cuda())*2/pho\n loss3 = torch.max(y_true-y_pred[:,:,2],torch.tensor([0.]).cuda())*2/pho\n loss = loss0+loss1+loss2+loss3\n loss[loss != loss] = 0\n return loss.mean()\n\ndef seq_preprocessing(grid_seqs):\n \"\"\"\n\n :param grid_seqs: list of (m, Tx, n_c, n_h, n_w)\n :param aqi_seqs: list of (m, Tx, n_c)\n :return:\n \"\"\"\n input_seqs = []\n target_meo_seqs = []\n avg_grids = []\n std_grids = []\n\n for data in grid_seqs:\n m, Tx, _, _, _ = data.shape\n avg = np.reshape(np.average(data, axis=(1, 3, 4)), (m, 1, 6, 1, 1))\n std = np.reshape(np.std(data, axis=(1, 3, 4)), (m, 1, 6, 1, 1))\n avg_grids.append(avg)\n std_grids.append(std)\n\n for i in range(len(grid_seqs)):\n grid_seq = grid_seqs[i]\n grid_seq = (grid_seq - avg_grids[i]) / std_grids[i]\n\n input_seq = grid_seq[:, :24, :, :, :] # Remove the last from the input seq\n target_meo = grid_seq[:, 24:, :, :, :]\n\n input_seqs.append(input_seq)\n target_meo_seqs.append(target_meo)\n\n assert len(input_seqs) == len(target_meo_seqs)\n return input_seqs, target_meo_seqs, avg_grids[0], std_grids[0]\n\n\ndef train(\n model,\n input_seqs,\n target_meo_seqs,\n dev_input_seqs,\n dev_target_meo_seqs,\n snapshots,\n iterations=100,\n lr=0.01,\n clipping_norm=1e-5):\n loss_function = nn.L1Loss()\n optimizer = optim.Adam(model.parameters(), lr)\n\n for epoch in range(iterations):\n losses = []\n for i in range(len(input_seqs)):\n input_seq = torch.tensor(\n input_seqs[i][:,:-1],\n dtype=torch.float32,\n device=device,\n )\n\n start_point = torch.tensor(\n input_seqs[i][:,-1:],\n dtype=torch.float32,\n device=device,\n )\n\n target_meo = torch.tensor(\n target_meo_seqs[i],\n dtype=torch.float32,\n device=device,\n )\n\n model.zero_grad()\n model, hidden_states = feed_model_data(model, input_seq)\n meo_forecasts = generate_forecasts(model, hidden_states, start_point)\n loss_meo = loss_function(meo_forecasts[:,:,:5], target_meo[:,:,:5])/5\n loss_pm = maemis_loss(meo_forecasts[:,:,5:], target_meo[:,:,-1])\n loss = loss_meo+loss_pm\n loss.backward()\n\n losses.append(loss.item())\n nn.utils.clip_grad_norm_(model.parameters(), clipping_norm)\n optimizer.step()\n\n loss = np.mean(losses)\n\n dev_loss = ev.compute_dev_set_loss(model, dev_input_seqs, dev_target_meo_seqs)\n snapshots.append((loss, dev_loss))\n #torch.save(model.state_dict(), 'models/3x3-2-256-2loss_{}.md'.format(dev_loss))\n\n return model, loss, dev_loss\n","sub_path":"convLSTM(PM2.5)/maemis_model/convlstm_training.py","file_name":"convlstm_training.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"8452784","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport copy as cp\nimport time\nimport h5py\nimport sys\nimport corner\nimport copy\nimport scipy\nimport astropy.constants as const\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy.special import comb\nfrom astropy import units as u\nfrom zreion import apply_zreion_fast\nfrom astropy.cosmology import Planck15\n\nimport analysis\nimport signals\nimport estimators\nimport fitting\nimport models\nimport utils\nimport survey\n\nplt.style.use('seaborn-colorblind')\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\n\ncolors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n# Simulations\n\nprint('loading simulations')\n\nwhich_box = 'little'\nprint('running analysis on', which_box, 'box')\n\nif which_box == 'little':\n rez = 512\n box = h5py.File('L80_halos_z=6.0155.hdf5', 'r')\n print(box.keys())\n\n redshift = 6.0155\n masses = np.array(box[('mass')])\n pos = np.array(box[('pos')])\n density = np.array(box[('rho')])\n x, y, z = pos.T\n\n runs = 3\n n_bins = 20\n\n box_size = 80 # in Mpc\n r = np.linspace(0, box_size, rez)\n r_vec = np.stack((r, r, r))\n\nif which_box == 'big':\n# rez = 1024\n# box = h5py.File('halos.z8.hdf5', 'r')\n# print(box.keys())\n#\n# density = np.fromfile('sims/rho.z=07.9589_cic_1024', dtype=np.float64).reshape(rez, rez, rez, order='F')\n#\n# #density.max()\n#\n redshift = 7.9589\n# masses = np.array(box[('m')])\n# x = np.array(box[('x')])\n# y = np.array(box[('y')])\n# z = np.array(box[('z')])\n#\n# runs = 3\n# n_bins = 20\n#\n# box_size = 160 # in Mpc\n# r = np.linspace(0, box_size, rez)\n# r_vec = np.stack((r, r, r))\n#\n# mass_voxels, mass_edges = np.histogramdd([x,y,z], bins=rez,\n# weights=masses)\n\nprint('generating underlying matter density spectrum')\n#print('loading underlying matter density spectrum')\n\ndelta = utils.overdensity(density)\nk, P_m = analysis.calc_pspec(r_vec, [delta], n_bins=n_bins, bin_scale='log')\nnp.savez(f'spectra/matter_pspec_z{redshift}', k=k, P_m=P_m)\n\n# matter_pspec = np.load('/home/mcbrie2/projects/def-acliu/mcbrie2/deprism/spectra/matter_pspec_6.0155.npz')\nmatter_pspec = np.load(f'spectra/matter_pspec_z{redshift}.npz')\nk = matter_pspec['k']\nP_m = matter_pspec['P_m']\n\nprint('yay! finished the matter stuff')\n\n### Datasets\n\n# pspecs_sf = np.load('spectra/pspecs_sf_z6.0155.npy')\n# pspecs_pl = np.load('pspecs_pl.npz')\n# pspecs_bt = np.load('pspecs_bt.npz')\n# pspecs_bt.files\n#\nspectra_sf = np.load(f'spectra_all_int/spectra_sf_z{redshift}.npy')\nspectra_pl = np.load(f'spectra_all_int/spectra_pl_z{redshift}.npy')\nspectra_bt = np.load(f'spectra_all_int/spectra_bt_z{redshift}.npy')\n\n# spectra_sf = np.load('/home/mcbrie2/projects/def-acliu/mcbrie2/deprism/spectra/pspecs_sf_z6.0155.npy')\n# spectra_pl = np.load('/home/mcbrie2/projects/def-acliu/mcbrie2/deprism/spectra/pspecs_pl_z6.0155.npy')\n# spectra_bt = np.load('/home/mcbrie2/projects/def-acliu/mcbrie2/deprism/spectra/pspecs_bt_z6.0155.npy')\n\n# Fitting\np_names = np.asarray(['b_i','b_j', 'b_k', 'P_m'])\nk_indices = [6]\n\nfrac_op = .005\nfrac_con = .01\nfrac_pess = .10\n\nnoise = np.asarray([.001, .005, .01, .05, .1, .15])\n\n# print('superfake temperature analysis')\n#\n# ### Superfake data and superfake noise levels\n# biases_sf = utils.extract_bias(k_indices, spectra_sf, P_m)\n# p_vals_sf = np.asarray([*biases_sf, P_m], dtype=object)\n#\n# params_sf = dict(zip(p_names, p_vals_sf))\n# ndim = utils.get_params(params_sf, k_indices).size\n# model = models.ScalarBias_crossonly(k=spectra_sf[0], params=params_sf)\n# N_modes_small = survey.calc_N_modes(k, 80**3 * u.Mpc**3, align='left')\n#\n# for i, n in enumerate(noise):\n# t0 = time.time()\n# print('Now on noise level',n,'%')\n# nsteps = int(1e6)\n# if n > .1:\n# nsteps = int(1e7)\n#\n# data_nl, Beane_nl, LSE_nl, MCMC_nl = analysis.keep_P_21(k_indices, spectra_sf, params_sf, n, model,\n# N_modes=N_modes_small, noiseless=True, nsteps=nsteps,\n# backend_filename=f'noise{n}_sf_nl_z{redshift}_int.h5')\n# data, Beane, LSE, MCMC = analysis.keep_P_21(k_indices, spectra_sf, params_sf, n, model,\n# N_modes=N_modes_small, noiseless=False, nsteps=nsteps,\n# backend_filename=f'noise{n}_sf_z{redshift}_int.h5')\n#\n#\n# np.savez(f'results_all_int/sf_fits/noise{n}_sf_nl_z{redshift}_int', data=data_nl, Beane=Beane_nl, LSE=LSE_nl,\n# samples=MCMC_nl[0], logp=MCMC_nl[1])\n# np.savez(f'results_all_int/sf_fits/noise{n}_sf_z{redshift}_int', data=data, Beane=Beane, LSE=LSE,\n# samples=MCMC[0], logp=MCMC[1])\n#\n# tf = time.time()\n# print(f'run {i} saved to disk')\n# print('time to complete superfake analysis run {i} is:', (tf - t0) / 60 / 60, 'hours')\n\n# ### Simulated power law data and fractional noise error\nprint('power law analysis')\n\nbiases_pl = utils.extract_bias(k_indices, spectra_pl, P_m)\np_vals_pl = np.asarray([*biases_pl, P_m], dtype=object)\n\nparams_pl = dict(zip(p_names, p_vals_pl))\nndim = utils.get_params(params_pl, k_indices).size\nmodel = models.ScalarBias_crossonly(k=spectra_pl[0], params=params_pl)\nN_modes_small = survey.calc_N_modes(k, 80**3 * u.Mpc**3, align='left')\n\nfor i, n in enumerate(noise):\n t0 = time.time()\n print('Now on noise level',n,'%')\n nsteps = int(1e6)\n if n > .1:\n nsteps = int(1e7)\n\n data_nl, Beane_nl, LSE_nl, MCMC_nl = analysis.keep_P_21(k_indices, spectra_pl, params_pl, n, model,\n N_modes=N_modes_small, noiseless=True, nsteps=nsteps,\n backend_filename=f'noise{n}_pl_nl_z{redshift}_int.h5')\n data, Beane, LSE, MCMC = analysis.keep_P_21(k_indices, spectra_pl, params_pl, n, model,\n N_modes=N_modes_small, noiseless=False, nsteps=nsteps,\n backend_filename=f'noise{n}_pl_z{redshift}_int.h5')\n\n\n np.savez(f'results_all_int/pl_fits/noise{n}_pl_nl_z{redshift}_int', data=data_nl, Beane=Beane_nl, LSE=LSE_nl,\n samples=MCMC_nl[0], logp=MCMC_nl[1])\n np.savez(f'results_all_int/pl_fits/noise{n}_pl_z{redshift}_int', data=data, Beane=Beane, LSE=LSE,\n samples=MCMC[0], logp=MCMC[1])\n\n\n tf = time.time()\n print(f'run {i} saved to disk')\n print('time to complete power law run {i} is:', (tf - t0) / 60, 'minutes')\n\n# ### Simulated brightness temperature data and fractional noise error\n# print('brightness temperature analysis')\n#\n# biases_bt = utils.extract_bias(k_indices, spectra_bt, P_m)\n# p_vals_bt = np.asarray([*biases_bt, P_m], dtype=object)\n#\n# params_bt = dict(zip(p_names, p_vals_bt))\n# ndim = utils.get_params(params_bt, k_indices).size\n# model = models.ScalarBias_crossonly(k=spectra_bt[0], params=params_bt)\n# N_modes_small = survey.calc_N_modes(k, 80**3 * u.Mpc**3, align='left')\n#\n# for i, n in enumerate(noise):\n# t0 = time.time()\n# print('Now on noise level',n,'%')\n# nsteps = int(1e6)\n# if n > .1:\n# nsteps = int(1e7)\n#\n# data_nl, Beane_nl, LSE_nl, MCMC_nl = analysis.keep_P_21(k_indices, spectra_bt, params_bt, n, model,\n# N_modes=N_modes_small, noiseless=True, nsteps=nsteps,\n# backend_filename=f'noise{n}_bt_nl_z{redshift}_int.h5')\n# data, Beane, LSE, MCMC = analysis.keep_P_21(k_indices, spectra_bt, params_bt, n, model,\n# N_modes=N_modes_small, noiseless=False, nsteps=nsteps,\n# backend_filename=f'noise{n}_bt_z{redshift}_int.h5')\n#\n#\n# np.savez(f'results_all_int/bt_fits/noise{n}_bt_nl_z{redshift}_int', data=data_nl, Beane=Beane_nl, LSE=LSE_nl,\n# samples=MCMC_nl[0], logp=MCMC_nl[1])\n# np.savez(f'results_all_int/bt_fits/noise{n}_bt_z{redshift}_int', data=data, Beane=Beane, LSE=LSE,\n# samples=MCMC[0], logp=MCMC[1])\n#\n#\n# tf = time.time()\n# print(f'run {i} saved to disk')\n# print(f'time to complete brightness temperature run {i} is:', (tf - t0) / 60, 'minutes')\n\n# # ### Fisher analysis\n","sub_path":"run_noises_pl.py","file_name":"run_noises_pl.py","file_ext":"py","file_size_in_byte":8491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"140991052","text":"#!/usr/bin/python3\n# palindrome_pairs.py\n# Given a list of words, return the list of all ordered pairs of unique indices\n# such that the concatenation of the strings at those indices is a palindrome.\n# For example, given the list of words ['ab', 'ba', 'x', 'yx'], you should\n# return the list [(0, 1), (1, 0), (2, 3)] since 'ab' + 'ba' = 'abba',\n# 'ba' + 'ab' = 'baab', and 'x' + 'yx' are palindromes.\n# DCP2.2;\n\n\ndef palindrome_pairs_brute_force(words):\n def is_palindrome(s):\n return s == s[::-1]\n\n palindrome_pairs = []\n for i in range(len(words)):\n for j in range(len(words)):\n if i != j and is_palindrome(words[i] + words[j]):\n palindrome_pairs.append((i, j))\n\n return palindrome_pairs\n\n\ndef palindrome_pairs_dictionary(words):\n def is_palindrome(s):\n lo = 0\n hi = len(s) - 1\n while lo < hi:\n if s[lo] != s[hi]:\n return False\n lo += 1\n hi -= 1\n return True\n\n word_indices = {}\n for i, w in enumerate(words):\n if w in word_indices:\n word_indices[w].append(i)\n else:\n word_indices[w] = [i]\n\n # Let w1 and w2 be words with len(w1) >= len(w2). There are two possible ways\n # in which w1 + w2 can be a palindrome:\n # (1) if w1 = x + p and w2 = reversed(x), where x is an arbitrary string and\n # p is a palindrome, then w1 + w2 = x + p + reversed(x) is a palindrome;\n # (2) if w1 = p + x and w2 = reversed(x), where x is an arbitrary string and\n # p is a palindrome, then w2 + w1 = reversed(x) + p + x is a palindrome.\n palindrome_pairs = set()\n for i, w in enumerate(words):\n for k in range(len(w) + 1):\n prefix = w[:k]\n suffix = w[k:]\n\n reversed_prefix = prefix[::-1]\n reversed_suffix = suffix[::-1]\n\n # (1) x = prefix, p = suffix\n if is_palindrome(suffix):\n for j in word_indices.get(reversed_prefix, []):\n if i != j:\n palindrome_pairs.add((i, j))\n # (2) x = suffix, p = prefix\n if is_palindrome(prefix):\n for j in word_indices.get(reversed_suffix, []):\n if i != j and (j, i) not in palindrome_pairs:\n palindrome_pairs.add((j, i))\n\n return list(palindrome_pairs)\n\n\ndef main():\n tests = (\n (['ab', 'ba', 'x', 'yx'], [(0, 1), (1, 0), (2, 3)]),\n (['ab', 'ba', 'cba'], [(0, 1), (0, 2), (1, 0)]),\n (['ab', 'ba', 'ba'], [(0, 1), (0, 2), (1, 0), (2, 0)]),\n (['abc', 'ba', 'ab', 'cba'], [(0, 1), (0, 3), (1, 2), (2, 1), (2, 3),\n (3, 0)]),\n (['abcb', 'a'], [(0, 1)]),\n (['aba', ''], [(0, 1), (1, 0)]),\n (['a', 'b', 'a'], [(0, 2), (2, 0)]),\n (['abcded', 'cba'], [(0, 1)]),\n )\n sols = (\n palindrome_pairs_brute_force,\n palindrome_pairs_dictionary,\n )\n\n correct_counts = {sol: 0 for sol in sols}\n for test_index, (words, want) in enumerate(tests):\n print('Test {}:'.format(test_index + 1))\n print('words = {}'.format(words))\n print('want = {}'.format(want))\n\n for sol in sols:\n got = sorted(sol(words))\n if got == want:\n print('{} passes'.format(sol.__name__))\n correct_counts[sol] += 1\n else:\n print('{} fails; got = {}'.format(sol.__name__, got))\n\n print()\n\n print('Summary:')\n for sol in sols:\n print('{}: {} of {} correct'.format(sol.__name__, correct_counts[sol],\n len(tests)))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"programming/python/interview_questions/strings/palindrome_pairs.py","file_name":"palindrome_pairs.py","file_ext":"py","file_size_in_byte":3732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"206528551","text":"import pygame\nimport Functions as f\nimport random\n\nclass PopulationManager:\n\n def __init__(self, worldMap):\n self.settlements = []\n self.worldMap = worldMap\n self.tileMap = []\n self.generateTileMap()\n self.shouldUpdateTileMap = False\n self.placePositions = self.findPlaceablePositions()\n\n def tick(self):\n if self.shouldUpdateTileMap:\n self.updateTileMap()\n for i in self.settlements:\n i.tick(self.tileMap)\n self.updateTileMap()\n\n def addSettlement(self, position = [-1, -1], population = 1, name = 'NONE'):\n settlementLocation = position\n if settlementLocation == [-1, -1]:\n settlementLocation = self.placePositions.pop(random.randint(0, len(self.placePositions)-1))\n\n self.settlements.append(Settlement(settlementLocation, name, population))\n self.tileMap[settlementLocation[0]][settlementLocation[1]] = name\n\n def findPlaceablePositions(self):\n placeablePositions = []\n for tempX in range(self.worldMap.getSize()):\n for tempY in range(self.worldMap.getSize()):\n if self.tileMap[tempX][tempY] == '':\n placeablePositions.append([tempX, tempY])\n return placeablePositions\n\n def generateTileMap(self):\n self.tileMap = [['X' for i in range(self.worldMap.getSize())] for j in range(self.worldMap.getSize())]\n for posX in range(self.worldMap.getSize()):\n for posY in range(self.worldMap.getSize()):\n if self.worldMap.foodMap[posX][posY] >= 0.7:\n self.tileMap[posX][posY] = ''\n\n\n def updateTileMap(self):\n for settlement in self.settlements:\n for districtLayer in settlement.districtMap:\n for district in districtLayer:\n self.tileMap[district.position[0]][district.position[1]] = settlement.settlementName\n self.shouldUpdateTileMap = False\n\nclass Settlement:\n\n def __init__(self, startPosition, settlementName = 'NONE', population = 1):\n self.settlementName = settlementName\n self.startPosition = startPosition\n self.districtMap = [[District(self.startPosition, 'residential')]]\n\n def tick(self, tileMap):\n print(len(self.districtMap))\n x = []\n for districtLayer in self.districtMap:\n for district in districtLayer:\n if not district.exp:\n continue\n for perm in [[-1, 0],[1, 0],[0, -1],[0, 1]]:\n if tileMap[district.position[0] + perm[0]][district.position[1] + perm[1]] == '':\n x.append(District([district.position[0] + perm[0], district.position[1] + perm[1]],'residential'))\n district.exp = False\n self.districtMap.append(x)\n\nclass District:\n\n def __init__(self, position, type):\n self.position = position\n self.type = type\n self.exp = True\n\n\n\n\n","sub_path":"SimpleSim/PopulationManager.py","file_name":"PopulationManager.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"607738848","text":"import os\nimport io\n\nimport pytest\n\nfrom release import cibuild\n\n\ndef test_buildenviron_live():\n be = cibuild.BuildEnviron.from_env()\n assert be.release_dir\n\n\ndef test_buildenviron_common():\n be = cibuild.BuildEnviron(\n system = \"Linux\",\n root_dir = \"/foo\",\n\n travis_tag = \"v0.0.1\",\n travis_branch = \"v0.x\",\n )\n assert be.release_dir == os.path.join(be.root_dir, \"release\")\n assert be.dist_dir == os.path.join(be.root_dir, \"release\", \"dist\")\n assert be.build_dir == os.path.join(be.root_dir, \"release\", \"build\")\n assert be.is_pull_request is False\n assert not be.has_docker_creds\n\n cs = io.StringIO()\n be.dump_info(cs)\n assert cs.getvalue()\n\n be = cibuild.BuildEnviron(\n system = \"Unknown\",\n root_dir = \"/foo\",\n )\n with pytest.raises(cibuild.BuildError):\n be.version\n with pytest.raises(cibuild.BuildError):\n be.platform_tag\n\n\ndef test_buildenviron_pr():\n # Simulates a PR. We build everything, but don't have access to secret\n # credential env variables.\n be = cibuild.BuildEnviron(\n travis_tag = \"\",\n travis_branch = \"master\",\n travis_pull_request = \"true\",\n\n should_build_wheel = True,\n should_build_pyinstaller = True,\n should_build_docker = True,\n )\n assert be.is_pull_request\n\n # Mini test for appveyor\n be = cibuild.BuildEnviron(\n appveyor_pull_request_number = \"xxxx\",\n )\n assert be.is_pull_request\n assert not be.is_prod_release\n\n\ndef test_buildenviron_commit():\n # Simulates an ordinary commit on the master branch.\n be = cibuild.BuildEnviron(\n travis_tag = \"\",\n travis_branch = \"master\",\n travis_pull_request = \"false\",\n\n should_build_wheel = True,\n should_build_pyinstaller = True,\n should_build_docker = True,\n docker_username = \"foo\",\n docker_password = \"bar\",\n )\n assert be.docker_tag == \"mitmproxy/mitmproxy:dev\"\n assert be.should_upload_docker\n assert not be.should_upload_pypi\n assert be.should_upload_docker\n assert not be.is_prod_release\n\n\ndef test_buildenviron_rleasetag():\n # Simulates a tagged release on a release branch.\n be = cibuild.BuildEnviron(\n system = \"Linux\",\n root_dir = \"/foo\",\n\n travis_tag = \"v0.0.1\",\n travis_branch = \"v0.x\",\n\n should_build_wheel = True,\n should_build_docker = True,\n should_build_pyinstaller = True,\n has_twine_creds = True,\n docker_username = \"foo\",\n docker_password = \"bar\",\n )\n assert be.tag == \"v0.0.1\"\n assert be.branch == \"v0.x\"\n assert be.version == \"0.0.1\"\n assert be.upload_dir == \"0.0.1\"\n assert be.docker_tag == \"mitmproxy/mitmproxy:0.0.1\"\n assert be.should_upload_pypi\n assert be.should_upload_docker\n assert be.is_prod_release\n\n\ndef test_buildenviron_branch():\n # Simulates a development branch on the main repo\n be = cibuild.BuildEnviron(\n system = \"Linux\",\n root_dir = \"/foo\",\n\n travis_tag = \"\",\n travis_branch = \"mybranch\",\n\n should_build_wheel = True,\n should_build_docker = True,\n should_build_pyinstaller = True,\n has_twine_creds = True,\n docker_username = \"foo\",\n docker_password = \"bar\",\n )\n assert be.tag == \"\"\n assert be.branch == \"mybranch\"\n assert be.version == \"mybranch\"\n assert be.upload_dir == \"branches/mybranch\"\n assert not be.should_upload_pypi\n assert not be.should_upload_docker\n\n\ndef test_buildenviron_osx(tmpdir):\n be = cibuild.BuildEnviron(\n system = \"Darwin\",\n root_dir = \"/foo\",\n\n travis_tag = \"v0.0.1\",\n travis_branch = \"v0.x\",\n )\n assert be.platform_tag == \"osx\"\n assert be.bdists == {\n \"mitmproxy\": [\"mitmproxy\", \"mitmdump\", \"mitmweb\"],\n \"pathod\": [\"pathoc\", \"pathod\"],\n }\n assert be.archive_name(\"mitmproxy\") == \"mitmproxy-0.0.1-osx.tar.gz\"\n\n a = be.archive(os.path.join(tmpdir, \"arch\"))\n assert a\n a.close()\n\n\ndef test_buildenviron_windows(tmpdir):\n be = cibuild.BuildEnviron(\n system = \"Windows\",\n root_dir = \"/foo\",\n\n travis_tag = \"v0.0.1\",\n travis_branch = \"v0.x\",\n )\n assert be.platform_tag == \"windows\"\n assert be.bdists == {\n \"mitmproxy\": [\"mitmdump\", \"mitmweb\"],\n \"pathod\": [\"pathoc\", \"pathod\"],\n }\n assert be.archive_name(\"mitmproxy\") == \"mitmproxy-0.0.1-windows.zip\"\n\n a = be.archive(os.path.join(tmpdir, \"arch\"))\n assert a\n a.close()","sub_path":"test/release/test_cibuild.py","file_name":"test_cibuild.py","file_ext":"py","file_size_in_byte":4543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"151655350","text":"#! /usr/bin/env python\n\nimport argparse\nimport os.path\nimport sys\nimport logging.handlers\nfrom model_training import initPaths, ShapeTrainer\n\n# =======================================\ndef printOptions(args, title):\n logger = logging.getLogger()\n logger.info(\"\".center(len(title)+4,'-'))\n logger.info(\"| %s |\" % title)\n logger.info(\"\".center(len(title)+4,'-'))\n logger.info(\"\")\n logger.info(\"Selected options:\")\n logger.info(\" - Model name: \" + args.model_name)\n if args.no_params:\n logger.info(\" - Procedure: No parameter selection\")\n elif args.reduced:\n logger.info(\" - Procedure: Reduced cross-validation\")\n elif args.full:\n logger.info(\" - Procedure: Full cross-validation\")\n if args.gammas:\n logger.info(\" - Gamma values: \" + args.gammas)\n else:\n logger.info(\" - Gamma values: Default \")\n i=1\n for d in args.dataset:\n logger.info(\" - Dataset %i: %s\" % (i, d))\n i=i+1\n if args.verbose:\n logger.info(\" - Verbose mode: On\")\n else:\n logger.info(\" - Verbose mode: Off\")\n if args.log:\n logger.info(\" - Log file: \" + args.log)\n else:\n logger.info(\" - Log file: Off\")\n if args.temp:\n logger.info(\" - Temporary files: Leave\")\n else:\n logger.info(\" - Temporary files: Delete\")\n logger.info(\"\") \n\n \n# =======================================\ndef parseArgs(title):\n parser = argparse.ArgumentParser(description=title)\n parser.add_argument('model_name',\n help='name of the model')\n parser.add_argument('dataset', nargs='+', \n help='path to a dataset')\n group1 = parser.add_mutually_exclusive_group(required=True)\n group1.add_argument('--no-params', action='store_true', default=False,\n help='run the training procedure without parameter selection; only one dataset is required and gamma can be specified using --gamma')\n group1.add_argument('--reduced', action='store_true', default=False,\n help='run the reduced cross-validation procedure; two datasets are required and the second one is always used for testing')\n group1.add_argument('--full', action='store_true', default=False,\n help='run the full cross-validation procedure; at least two datasets are required')\n parser.add_argument('--gammas', metavar='values', action='store', default=\"\",\n help='comma separated list of gamma values')\n group3 = parser.add_argument_group('debugging')\n group3.add_argument('--temp', action='store_true', default=False,\n help='do not delete the temporary files')\n group3.add_argument('--verbose', action='store_true', default=False,\n help='be verbose')\n group3.add_argument('--log', action='store_true', default=False,\n help='create a log file')\n args = parser.parse_args()\n\n # Check arguments\n if args.no_params and len(args.dataset)!=1:\n parser.error(\"Only one dataset can be provided if the --no-params option is used.\")\n elif args.reduced and len(args.dataset)!=2:\n parser.error(\"Two datasets must be provided if the --reduced option is used.\")\n elif args.full and len(args.dataset)<2:\n parser.error(\"At least two datasets are required for the full training procedure.\")\n\n if args.log:\n args.log=os.path.basename(sys.argv[0]).split('.')[0]+\".log\"\n\n if len(args.gammas)>0:\n args.gammas = args.gammas.split(',')\n else:\n args.gammas=[]\n\n if args.model_name.find('/')>=0 or args.model_name.find('.')>=0:\n parser.error(\"Incorrect model name.\")\n\n return args\n\n\n# =======================================\ndef configureLog(args):\n # Set up a specific logger with our desired output level\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n # Add the log message handlers to the logger\n if args.log:\n rollOver=False\n if os.path.exists(args.log):\n rollOver=True\n fileHandler = logging.handlers.RotatingFileHandler(args.log, backupCount=5)\n if rollOver:\n fileHandler.doRollover()\n fileHandler.setLevel(logging.DEBUG)\n logger.addHandler(fileHandler)\n streamHandler = logging.StreamHandler(sys.stdout) \n if args.verbose:\n streamHandler.setLevel(logging.DEBUG)\n else:\n streamHandler.setLevel(logging.INFO)\n logger.addHandler(streamHandler)\n\n\n# =======================================\ndef main():\n\n # Title\n propertyName=\"shape\"\n title = \"Training '%s' property models for the Categorical Subarchitecture\" % propertyName\n\n # Args\n args=parseArgs(title)\n configureLog(args)\n printOptions(args, title)\n \n # Initialize paths\n initPaths(args.model_name, args.temp)\n \n # Run training\n logging.getLogger().info(\"Performing training:\")\n shapeTrainer = ShapeTrainer(args.gammas)\n shapeTrainer.train(args.full, args.reduced, args.no_params, args.dataset)\n \n\n# =======================================\nif __name__ == '__main__':\n main()","sub_path":"subarchitectures/categorical.sa/branches/avs-iros11/src/python/tools/train_shape.py","file_name":"train_shape.py","file_ext":"py","file_size_in_byte":5137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"116929917","text":"from collections import deque\n\n\nclass TreeNode(object):\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\nclass Solution:\n \"\"\"\n @param root: root of the given tree\n @return: whether it is a mirror of itself\n \"\"\"\n\n def isSymmetric(self, root):\n # Write your code here\n if not root:\n return True\n q = deque()\n temp = deque()\n q.append(root.left)\n q.append(root.right)\n\n while len(q) > 0:\n while len(q) > 0:\n left = q.popleft()\n right = q.pop()\n if left and right:\n if left.val != right.val:\n return False\n temp.appendleft(left.right)\n temp.appendleft(left.left)\n temp.append(right.left)\n temp.append(right.right)\n elif not left and not right:\n continue\n else:\n return False\n q = temp\n temp = deque()\n return True\n\n\nif __name__ == '__main__':\n root = TreeNode(5)\n root.left = TreeNode(3)\n root.left.left = TreeNode(1)\n root.left.right = TreeNode(4)\n root.right = TreeNode(8)\n root.right.left = TreeNode(6)\n solution = Solution()\n print(solution.isSymmetric(root))\n","sub_path":"Lintcode1360.py","file_name":"Lintcode1360.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"521375380","text":"import time # import the time library for the sleep function\r\nimport brickpi3 # import the BrickPi3 drivers\r\n\r\nBP = brickpi3.BrickPi3() # Create an instance of the BrickPi3 class. BP will be the BrickPi3 object.\r\n\r\ntry:\r\n while True:\r\n print(\"Battery voltage: %6.3f\" % (BP.get_voltage_battery())) # read and display the current voltages\r\n \r\n time.sleep(0.02)\r\n\r\nexcept KeyboardInterrupt: # except the program gets interrupted by Ctrl+C on the keyboard.\r\n BP.reset_all()","sub_path":"Projects/Testing/readVoltages.py","file_name":"readVoltages.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"144648374","text":"#!/usr/bin/env python\nimport xml.etree.ElementTree as ET\nimport pprint\nimport re\nimport codecs\nimport json\nfrom pymongo import MongoClient\nimport os\n\n\n\n# source : http://napitupulu-jon.appspot.com/posts/wrangling-openstreetmap.html\nlower = re.compile(r'^([a-z]|_)*$')\nlower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')\nproblemchars = re.compile(r'[=\\+/&<>;\\'\"\\?%#$@\\,\\. \\t\\r\\n]')\naddresschars = re.compile(r'addr:(\\w+)')\nCREATED = [ \"version\", \"changeset\", \"timestamp\", \"user\", \"uid\"]\nosm_file = 'updatedAmst2.osm'\n\ndef shape_element(element):\n #node = defaultdict(set)\n node = {}\n if element.tag == \"node\" or element.tag == \"way\" :\n #create the dictionary based on exaclty the value in element attribute.\n node = {'created':{}, 'type':element.tag}\n for k in element.attrib:\n try:\n v = element.attrib[k]\n except KeyError:\n continue\n if k == 'lat' or k == 'lon':\n continue\n if k in CREATED:\n node['created'][k] = v\n else:\n node[k] = v\n try:\n node['pos']=[float(element.attrib['lat']),float(element.attrib['lon'])]\n except KeyError:\n pass\n \n if 'address' not in node.keys():\n node['address'] = {}\n #Iterate the content of the tag\n for stag in element.iter('tag'):\n #Init the dictionry\n\n k = stag.attrib['k']\n v = stag.attrib['v']\n #Checking if indeed prefix with 'addr' and no ':' afterwards\n if k.startswith('addr:'):\n if len(k.split(':')) == 2:\n content = addresschars.search(k)\n if content:\n node['address'][content.group(1)] = v\n else:\n node[k]=v\n if not node['address']:\n node.pop('address',None)\n #Special case when the tag == way, scrap all the nd key\n if element.tag == \"way\":\n node['node_refs'] = []\n for nd in element.iter('nd'):\n node['node_refs'].append(nd.attrib['ref'])\n# if 'address' in node.keys():\n# pprint.pprint(node['address'])\n return node\n else:\n return None\n\n\ndef process_map(file_in, pretty = False):\n \"\"\"\n Process the osm file to json file to be prepared for input file to monggo\n \"\"\"\n file_out = \"{0}.json\".format(file_in)\n data = []\n with codecs.open(file_out, \"w\") as fo:\n for _, element in ET.iterparse(file_in):\n el = shape_element(element)\n if el:\n data.append(el)\n if pretty:\n fo.write(json.dumps(el, indent=2)+\"\\n\")\n else:\n fo.write(json.dumps(el) + \"\\n\")\n return data\n\ndata = process_map(osm_file)\npprint.pprint(data[10])\n\n\ndb_name = 'AmsOSM'\n# Connect to Mongo DB\nclient = MongoClient('localhost:27017') \ndb = client[db_name] \nc = db.AmsMAP\nc.insert(data)\npprint.pprint(c)\n\n\n\n\n\nprint('size of data',db.AmsMAP.count()) # of data\nprint ('number of ways',db.AmsMAP.find({'type':'way'}).count()) # of way \nprint ('number of nodes',db.AmsMAP.find({'type':'node'}).count()) # of nodes\nprint ('number of bicycle parkings',db.AmsMAP.find({'amenity':'bicycle_parking'}).count()) # how many bicycle parkings in Amsterdam old center\nprint ('number of tourism attractions',db.AmsMAP.find({'tourism':'attraction'}).count())# how many tourism attractions in Amsterdam old center \n\n\n#Looking into the top 10 cusines in the old cental\ncuisine = db.AmsMAP.aggregate([\n {\"$match\" : {\"cuisine\" : {\"$exists\" : 1}}},\n {\"$group\" : {\"_id\" : \"$cuisine\",\n \"count\" : {\"$sum\" : 1}}},\n {\"$sort\" : {\"count\" : -1}},\n {\"$limit\" : 5}\n ])\nprint ('The top 10 cuisine:') \npprint.pprint([doc for doc in cuisine])\n\n\n#most common amenity top 5\n\namenity = db.AmsMAP.aggregate([ \n { \"$group\" : { \"_id\" : \"$amenity\",\"count\": {\"$sum\": 1 }}},\n { \"$sort\" : { \"count\" : -1 }},\n { \"$skip\" : 1 },\n { \"$limit\" : 5 }\n ])\nprint ('top 5 common amrity:') \npprint.pprint([doc for doc in amenity])\n\n\n\n#\nshops = db.AmsMAP.aggregate([\n {\"$match\" : {\"shop\" : {\"$exists\" : 1}}},\n {\"$group\" : {\"_id\" : \"$shop\",\n \"count\" : {\"$sum\" : 1}}},\n {\"$sort\" : {\"count\" : -1}},\n {\"$limit\" : 15}\n ])\nprint ('The top 15 common shops:') \npprint.pprint([doc for doc in shops])\n\n#University\nUni = db.AmsMAP.aggregate([{\"$match\":{\"amenity\":{\"$exists\":1}, \"amenity\": \"university\", \"name\":{\"$exists\":1}}},\n {\"$group\":{\"_id\":\"$name\", \"count\":{\"$sum\":1}}},\n {\"$sort\":{\"count\":-1}}])\nprint ('The Universities in Amsterdam:') \npprint.pprint([doc for doc in Uni])\n\n\n\n\n\n\n","sub_path":"Project3_DATA_WRANGLING/insert_osm_data.py","file_name":"insert_osm_data.py","file_ext":"py","file_size_in_byte":4855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"455368399","text":"import numpy as np\n\ndef calculate_discounted_returns(rewards):\n \"\"\"\n Calculate discounted reward and then normalize it\n See Sutton book for definition\n\n Params:\n rewards: list of rewards for every episode\n \"\"\"\n returns = np.zeros(len(rewards))\n\n next_return = 0 # 0 because we start at the last timestep\n for t in reversed(range(0, len(rewards))):\n next_return = rewards[t] + args.gamma * next_return\n returns[t] = next_return\n # normalize for better statistical properties\n returns = (returns - returns.mean()) / (returns.std() + np.finfo(np.float32).eps)\n return returns\n\n","sub_path":"numpy/utils/rl_common.py","file_name":"rl_common.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"376804557","text":"import vapoursynth as vs\nimport re\nimport functools\n\ndef parse_preset(preset_string):\n \"\"\"\n Uses regex to parse parameters from preset string.\n\n :param preset_string: the string to be parsed\n :return: tuple in width, height, fpsnum, fpsden, format. None if illegal string\n \"\"\"\n funcName = 'parse_preset'\n valid_resolutions = {'QVGA': (320, 240),\n 'VGA': (640, 480),\n 'qHD': (640, 360),\n 'XGA': (1024, 768),\n 'HD': (1280, 720),\n 'WXGA': (1280, 768),\n 'FHD': (1920, 1080),\n 'QHD': (2560, 1440)}\n\n valid_format = ['RGB24', 'RGB48',\n 'YUV420P8', 'YUV420P10', 'YUV420P16',\n 'YUV422P8', 'YUV422P10', 'YUV422P16',\n 'YUV444P8', 'YUV444P10', 'YUV444P16',\n 'GRAY8', 'GRAY16']\n\n valid_res_string = '|'.join(valid_resolutions.keys())\n valid_format_string = '|'.join(valid_format)\n\n expression = re.compile(r'({res})_\\d+_\\d+_({form})'.format(res=valid_res_string,\n form=valid_format_string))\n\n result = expression.match(preset_string)\n\n if not result:\n raise ValueError(funcName + ': Invalid preset string')\n else:\n r, fn, fd, fo = result.group().split('_')\n\n return valid_resolutions[r][0], valid_resolutions[r][1],\\\n int(fn), int(fd), getattr(vs, fo)\n\n\ndef parse_color(color):\n \"\"\"\n Helper function that parses a string or int to 8-bit standard RGB values.\n\n :param color: the color string/integer to be parsed\n :return: tuple of int: (R, G, B)\n \"\"\"\n funcName = 'parse_color'\n if isinstance(color, int):\n if color > 0xFFFFFF or color < 0:\n raise ValueError(funcName + \": Invalid color value\")\n r = color >> 16\n g = (color >> 8) - (r << 8)\n b = color - (r << 16) - (g << 8)\n elif isinstance(color, str):\n if color.startswith('#'):\n color = color[1:]\n color = color.capitalize()\n expression = re.compile(r'(\\d|A|B|C|D|E|F){8}')\n result = expression.match(color)\n\n if not result:\n raise ValueError(funcName + ': Invalid color string')\n\n ret = result.group()\n r, g, b = map(functools.partial(int, base=16), (ret[:2], ret[2:4], ret[4:]))\n else:\n raise ValueError(funcName + ': Uninterpretable color')\n\n return r, g, b\n\n\ndef new_clip(preset=\"FHD_24000_1001_RGB24\", width=None, height=None,\n fpsnum=None, fpsden=None, format=None, duration=None, color=0x000000):\n \"\"\"\n Get a blank clip with given background color.\n\n :param preset: string in {Resolution}_{FPS dividend}_{DPS denominator}_{VS Format}\n :param width: video width, integer, overrides preset\n :param height: video height, integer, overrides preset\n :param fpsnum: FPS dividend, integer, overrides preset\n :param fpsden: FPS denominator, integer, overrides preset\n :param format: video format, must be VS.format, overrides preset\n :param duration: the duration of the new clip, integer, in seconds. Default to be 30\n :param color: the color of the new blank clip, in standard 8-bit RGB.\n Can be string or HEX. Defaults to be black.\n :return: the corresponding blank clip.\n \"\"\"\n\n funcName = \"new_clip\"\n format = format if isinstance(format, vs.Format) else \\\n getattr(vs, format) if hasattr(vs, str(format)) else None\n w, h, fn, fd, fo, du = None, None, None, None, None, None\n\n try:\n w, h, fn, fd, fo = parse_preset(preset)\n r, g, b = parse_color(color)\n except ValueError as e:\n raise ValueError(funcName + ': ' + str(e))\n\n w = width if width else w\n h = height if height else h\n fn = fpsnum if fpsnum else fn\n fd = fpsden if fpsden else fd\n fo = format if format else fo\n du = duration if duration and isinstance(duration, int) else 30\n\n\n if not (w and h and fn and fd and fo):\n raise ValueError(funcName + \": Invalid parameters.\")\n\n core = vs.get_core()\n\n rgb8_to_yuv8 = lambda r, g, b: tuple(map(int, (0.299 * r + 0.587 * g + 0.114 * b,\n -0.169 * r - 0.331 * g + 0.5 * b + 128,\n 0.5 * r - 0.419 * g - 0.081 * b + 128)))\n\n rgb16_to_yuv16 = lambda r, g, b: tuple(map(int, (0.299 * r + 0.587 * g + 0.114 * b,\n -0.169 * r - 0.331 * g + 0.5 * b + 32768,\n 0.5 * r - 0.419 * g - 0.081 * b + 32768)))\n\n depth8_to_16 = lambda val: (val << 8) + (1 << 7)\n\n if fo.value > 1000000 and fo.value < 2000000: # vs.GRAY\n if fo == vs.GRAY8:\n co = (r+g+b)/3\n elif fo == vs.GRAY16:\n co = depth8_to_16((r+g+b)/3)\n else:\n raise ValueError(funcName + ': Unsupported format')\n\n elif fo.value > 2000000 and fo.value < 3000000: # vs.RGB\n if fo == vs.RGB24:\n co = (r, g, b)\n elif fo == vs.RGB48:\n co = tuple(map(depth8_to_16, (r, g, b)))\n else:\n raise ValueError(funcName + ': Unsupported format')\n\n elif fo.value > 3000000 and fo.value < 4000000: # vs.YUV\n if fo.name.endswith('P8'):\n co = rgb8_to_yuv8(r, g, b)\n elif fo.name.endswith('P16'):\n co = rgb16_to_yuv16(*tuple(map(depth8_to_16, (r, g, b))))\n else:\n raise ValueError(funcName + ': Unsupported format')\n else:\n raise ValueError(funcName + ': Unsupported format')\n\n return core.std.BlankClip(width=w, height=h, format=fo,\n fpsnum=fn, fpsden=fd, length=du, color=co)\n\n","sub_path":"vsnle/clip_utils.py","file_name":"clip_utils.py","file_ext":"py","file_size_in_byte":5748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"616642199","text":"from user.models import UserData\nfrom django.db import serializers\n\n\nclass ProductCreateSerializer(serializers.ModelSerializer):\n class Meta:\n model = UserData\n fields = [\n \"userId\",\n \"id\",\n \"title\",\n \"body\",\n ]\n def create(self,validated_data):\n return UserData.objects.create(**validated_data)","sub_path":"user/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"26186305","text":"# Stefan Yuzhao Heng CS3\n\nfrom tkinter import*\nfrom tkinter import messagebox\nfrom tkinter import font\nfrom random import*\n\ndef Initialize():\n global InfoShown\n global x\n global Disabled\n global labelInfoBG\n global PlayerNumber\n labelInfoBG = Label(root)\n Disabled = False\n x=2\n InfoShown =False\n global A\n global T\n global J\n global Q\n global K\n A = 'A'\n T = 'T'\n J = 'J'\n Q = 'Q'\n K = 'K'\n global Deck\n Deck = [0,A,2,3,4,5,6,7,8,9,T,J,Q,K,A,2,3,4,5,6,7,8,9,T,J,Q,K,A,2,3,4,5,6,7,8,9,T,J,Q,K,A,2,3,4,5,6,7,8,9,T,J,Q,K]\n global blank\n blank = \" \"\n global Card\n Card = [[\"Not Used\"],[],[],[],[],[],[]]\n global Busted\n global BlackJack\n global OK\n OK = \"OK\"\n Busted = \"Busted\"\n BlackJack = \"BlackJack\"\n global StatusCount\n StatusCount = [\"Not used\",OK,OK,OK,OK,OK,OK]\n global Winner\n Winner = [\"Not used\",blank,blank,blank,blank,blank,blank]\n global BankerShown\n BankerShown = False\n global Y\n Y = \"Y\"\n global StandPressed\n StandPressed = [\"Not used\",blank,blank,blank,blank,blank,blank]\n\ndef LoadInterface():\n global root\n root=Tk()\n root.title(\"Black Jack\")\n cv=Canvas(root,width=500,height=720)\n cv.pack()\n LoadBackGround()\n\ndef LoadBackGround():\n labelBackGround=Label(root,bg=\"#bbbbbb\")\n labelBackGround.place(height=4000,width=4000,x=0,y=0)\n \ndef SetPlayers():\n global root\n global labelInputText\n global button2\n global button3\n global button4\n global button5\n global button6\n Number = [\"Not used\",1,2,3,4,5,6]\n labelInputText=Label(root,activebackground=\"#bbbbbb\",text=\"Input the number of players:\",anchor=\"w\",bg=\"#bbbbbb\",font=(\"Marker Felt\",23,\"bold\"))\n labelInputText.place(height=100,width=400,x=50,y=120)\n XCoor = 60\n button2=Button(root,activebackground=\"#bbbbbb\",text=Number[2],font=(\"造字工房悦黑(非商用)纤细体\",30,\"bold\"))\n button2.place(height=50,width=50,x=XCoor,y=300)\n button2.bind(\"\",NumberInput)\n XCoor = XCoor + 75\n button3=Button(root,activebackground=\"#bbbbbb\",text=Number[3],font=(\"造字工房悦黑(非商用)纤细体\",30,\"bold\"))\n button3.place(height=50,width=50,x=XCoor,y=300)\n button3.bind(\"\",NumberInput)\n XCoor = XCoor + 75\n button4=Button(root,activebackground=\"#bbbbbb\",text=Number[4],font=(\"造字工房悦黑(非商用)纤细体\",30,\"bold\"))\n button4.place(height=50,width=50,x=XCoor,y=300)\n button4.bind(\"\",NumberInput)\n XCoor = XCoor + 75\n button5=Button(root,activebackground=\"#bbbbbb\",text=Number[5],font=(\"造字工房悦黑(非商用)纤细体\",30,\"bold\"))\n button5.place(height=50,width=50,x=XCoor,y=300)\n button5.bind(\"\",NumberInput)\n XCoor = XCoor + 75\n button6=Button(root,activebackground=\"#bbbbbb\",text=Number[6],font=(\"造字工房悦黑(非商用)纤细体\",30,\"bold\"))\n button6.place(height=50,width=50,x=XCoor,y=300)\n button6.bind(\"\",NumberInput)\n \ndef NumberInput(event):\n global PlayerNumber \n button=event.widget\n PlayerNumber = int(button[\"text\"])\n button2.place_forget()\n button3.place_forget()\n button4.place_forget()\n button5.place_forget()\n button6.place_forget()\n labelInputText.place_forget()\n SubMain()\n \ndef DisplayPlayers():\n global PlayerDisplay\n global labelPlayerName\n Number = [\"Not used\",1,2,3,4,5,6]\n PlayerDisplay = [\"Not used\",\"Banker\",\"Player 2\",\"Player 3\",\"Player 4\",\"Player 5\",\"Player 6\"]\n YCoor = 10\n for i in range (1,PlayerNumber+1):\n labelPlayerName=Label(root,text=PlayerDisplay[i]+\":\",anchor=\"w\",bg=\"#bbbbbb\",font=(\"造字工房悦黑(非商用)纤细体\",15,\"bold\"))\n labelPlayerName.place(height=60,width=80,x=15,y=YCoor)\n YCoor = YCoor + 70\n\ndef PlayersDraw():\n DrawAll()\n DrawAll()\n CountPoints()\n\ndef DrawAll():\n global PlayerNumber\n for i in range(1,PlayerNumber + 1):\n Draw(i)\n\ndef Draw(i):\n temp = randint(1,52)\n while Deck[temp] == blank:\n temp = randint(1,52)\n Card[i].append(Deck[temp])\n Deck[temp] = blank\n\ndef CountPoints():\n global PointCount\n ACount = 0\n TypeCount = [[\"Not used\"],[],[],[],[],[],[]]\n PointCount = [\"Not used\",0,0,0,0,0,0]\n for i in range(1, PlayerNumber + 1):\n for e in range(0, len(Card[i])):\n if A == Card[i][e]:\n ACount = ACount + 1\n elif T == Card[i][e] or J == Card[i][e] or Q == Card[i][e] or K == Card[i][e]:\n PointCount[i] = PointCount[i] + 10\n else:\n PointCount[i] = PointCount[i] + Card[i][e]\n TypeCount[i].append(ACount)\n if ACount >= 1:\n PointCount[i] = PointCount[i]+10+ACount\n if PointCount[i] > 21:\n PointCount[i] = PointCount[i] - 10\n ACount = 0\n for i in range(1,PlayerNumber + 1):\n if PointCount[i] > 21:\n StatusCount[i] = Busted\n elif PointCount[i] == 21:\n StatusCount[i] = BlackJack\n\ndef ShowCardsHidden():\n global BankerShown\n CheckBankerShow()\n for e in range(2,PlayerNumber+1):\n Show2Cards(e)\n PlayerHidden(e)\n\ndef ShowCardsShown():\n global BankerShown\n CheckBankerShow()\n for e in range(2,PlayerNumber+1):\n Show2Cards(e)\n PlayerShown(e)\n \ndef CheckBankerShow():\n XCoor = 125\n YCoor = 10\n labelThisCard=Label(root,text=Card[1][0],bg=\"#808080\",font=(\"造字工房悦黑(非商用)纤细体\",30,\"bold\"))\n labelThisCard.place(height=60,width=50,x=XCoor,y=YCoor)\n XCoor = XCoor + 70\n labelThisCard=Label(root,text=blank,bg=\"#909090\",font=(\"造字工房悦黑(非商用)纤细体\",30,\"bold\"))\n labelThisCard.place(height=60,width=50,x=XCoor,y=YCoor)\n if BankerShown == True:\n Show2Cards(1)\n PlayerShown(1)\n\ndef Show2Cards(PlayerIndex):\n XCoor = 125\n YCoor = 10+70*(PlayerIndex-1)\n for i in range(1,3):\n labelThisCard=Label(root,text=Card[PlayerIndex][i-1],bg=\"#b0b0b0\",font=(\"造字工房悦黑(非商用)纤细体\",30,\"bold\"))\n labelThisCard.place(height=60,width=50,x=XCoor,y=YCoor)\n XCoor = XCoor + 70\n\ndef PlayerHidden(PlayerIndex):\n XCoor = 125+70*2\n YCoor = 10+70*(PlayerIndex-1)\n for i in range(3,len(Card[PlayerIndex])+1):\n labelThisCard=Label(root,text=blank,bg=\"#909090\",font=(\"造字工房悦黑(非商用)纤细体\",30,\"bold\"))\n labelThisCard.place(height=60,width=50,x=XCoor,y=YCoor)\n XCoor = XCoor + 70\n\ndef PlayerShown(PlayerIndex):\n XCoor = 125+70*2\n YCoor = 10+70*(PlayerIndex-1)\n for i in range(3,len(Card[PlayerIndex])+1):\n labelThisCard=Label(root,text=Card[PlayerIndex][i-1],bg=\"#909090\",font=(\"造字工房悦黑(非商用)纤细体\",30,\"bold\"))\n labelThisCard.place(height=60,width=50,x=XCoor,y=YCoor)\n XCoor = XCoor + 70\n\ndef PlayerTurn(CurrentPlayer):\n global labelSubBG\n labelSubBG=Label(root,bg=\"#b1b1b1\")\n labelSubBG.place(height=200,width=270,x=15,y=10+70*PlayerNumber+5)\n labelPlayerName=Label(root,text=PlayerDisplay[CurrentPlayer]+\"'s turn:\",bg=\"#b1b1b1\",anchor=\"w\",font=(\"造字工房悦黑(非商用)纤细体\",18,\"bold\"))\n labelPlayerName.place(height=50,width=200,x=15+15,y=10+70*PlayerNumber+5)\n\ndef HitButton(CurrentPlayer):\n global ButtonHit\n ButtonHit=Button(root,text=\"HIT\",activebackground=\"#bbbbbb\",font=(\"造字工房悦黑(非商用)纤细体\",15,\"bold\"))\n ButtonHit.place(height=50,width=80,x=80,y=10+70*PlayerNumber+70)\n ButtonHit.bind(\"\",HitDone)\n x = CurrentPlayer\n root.bind(\"\",KeyboardCheck)\n\ndef HitDone(event):\n global labelShowInfo\n Disabled = False\n if ButtonHit['state'] != 'disabled':\n Draw(x)\n CountPoints()\n if BankerShown != True:\n ShowCardsHidden()\n elif BankerShown == True:\n ShowCardsShown()\n Info = \"Your cards are: \"+str(Card[x])+\".\\nYour score is \"+str(PointCount[x])+\".\"\n labelShowInfo['text']=Info\n if StandPressed[x] == Y or StatusCount[x] == Busted:\n DisableHit(ButtonHit)\n Disabled = True\n\ndef KeyboardCheck(event):\n if event.char =='h':\n Disabled = False\n global x\n if ButtonHit['state'] != 'disabled':\n Draw(x)\n CountPoints()\n ShowCards()\n Info = \"Your cards are: \"+str(Card[x])+\".\\nYour score is \"+str(PointCount[x])+\".\"\n labelShowInfo['text']=Info\n if StandPressed[x] == Y or StatusCount[x] == Busted:\n DisableHit(ButtonHit)\n Disabled = True\n elif event.char == 's':\n StandPressed[x] = Y\n if StandPressed[x] == Y or StatusCount[x] == Busted:\n DisableHit(ButtonHit)\n Disabled = True\n elif event.char == 'i':\n global InfoShown\n global labelInfoBG\n if InfoShown == False:\n labelInfoBG=Label(root,bg=\"#b1b1b1\")\n labelInfoBG.place(height=70,width=270,x=15,y=10+70*PlayerNumber+5+70+70+60)\n labelShowInfo.place_forget()\n GetInfo()\n InfoShown = True\n elif InfoShown == True:\n labelShowInfo.place_forget()\n labelInfoBG.place_forget()\n InfoShown = False\n elif event.char == 'p' or event.char =='e':\n if x != 1:\n DisplayPlayers()\n InfoShown = False\n labelShowInfo.place_forget()\n labelInfoBG.place_forget()\n if x >= 2 and x < PlayerNumber:\n x = x + 1\n elif x == PlayerNumber:\n x = 1\n BankerShown = True\n ShowCard(1)\n for i in range(2,PlayerNumber+1):\n PlayerShown(i)\n labelPlayerName=Label(root,text=PlayerDisplay[x]+\"'s turn:\",bg=\"#b1b1b1\",anchor=\"w\",font=(\"造字工房悦黑(非商用)纤细体\",18,\"bold\"))\n labelPlayerName.place(height=50,width=200,x=15+15,y=10+70*PlayerNumber+5)\n ButtonHit['state']=\"normal\"\n if x == 1:\n ButtonNext['text'] = \"END\"\n ButtonNext.bind(\"\",ShowResult)\n root.bind(\"\",KeyboardCheck)\n root.bind(\"\",KeyboardCheck)\n elif x == 1:\n temp = \"\"\n for i in range (2,PlayerNumber+1):\n temp = temp+\"Banker VS \"+PlayerDisplay[i]+\"\\n\"\n if PointCount[1] > PointCount[i]:\n temp = temp+\" Banker wins\"\n elif PointCount[1] < PointCount[i]:\n temp = temp+\" \"+PlayerDisplay[i]+\" wins\"\n elif PointCount[1] == PointCount[i]:\n temp = temp+\" Draw\"\n temp = temp+\"\\n\"\n messagebox.showinfo(\"Game finished\",temp)\n elif event.char == 'r':\n LoadBackGround()\n Initialize()\n SetPlayers()\n \ndef StandButton(x):\n ButtonStand=Button(root,text=\"STAND\",activebackground=\"#bbbbbb\",font=(\"造字工房悦黑(非商用)纤细体\",15,\"bold\"))\n ButtonStand.place(height=50,width=80,x=180,y=10+70*PlayerNumber+70)\n ButtonStand.bind(\"\",StandCheck)\n root.bind(\"\",KeyboardCheck)\n\ndef StandCheck(event):\n StandPressed[x] = Y\n if StandPressed[x] == Y or StatusCount[x] == Busted:\n DisableHit(ButtonHit)\n Disabled = True\n\ndef ShowInfoButton():\n global labelShowInfo\n ButtonShowInfo=Button(root,text=\"SHOW INFO\",activebackground=\"#bbbbbb\",font=(\"造字工房悦黑(非商用)纤细体\",15,\"bold\"))\n ButtonShowInfo.place(height=50,width=130,x=30,y=10+70*PlayerNumber+140)\n ButtonShowInfo.bind(\"\",ShowInfo)\n root.bind(\"\",KeyboardCheck)\n\ndef GetInfo():\n global labelShowInfo\n Info = \"Your cards are: \"+str(Card[x])+\".\\nYour score is \"+str(PointCount[x])+\".\"\n labelShowInfo=Label(root,text=Info,anchor=\"w\",bg=\"#808080\")\n labelShowInfo.place(height=50,width=240,x=30,y=10+70*PlayerNumber+210)\n\ndef ShowInfo(event):\n global InfoShown\n global labelShowInfo\n global labelInfoBG\n if InfoShown == False:\n labelInfoBG=Label(root,bg=\"#b1b1b1\")\n labelInfoBG.place(height=70,width=270,x=15,y=10+70*PlayerNumber+5+70+70+60)\n labelShowInfo.place_forget()\n GetInfo()\n InfoShown = True\n elif InfoShown == True:\n labelShowInfo.place_forget()\n labelInfoBG.place_forget()\n InfoShown = False\n\ndef NextButton():\n global ButtonNext\n ButtonNext=Button(root,text=\"PASS\",activebackground=\"#bbbbbb\",font=(\"造字工房悦黑(非商用)纤细体\",15,\"bold\"))\n ButtonNext.place(height=50,width=80,x=180,y=10+70*PlayerNumber+140)\n ButtonNext.bind(\"\",Next)\n\ndef Next(event):\n global labelShowInfo\n global ButtonNext\n global BankerShown\n global labelInfoBG\n global InfoShown\n DisplayPlayers()\n InfoShown = False\n labelShowInfo.place_forget()\n labelInfoBG.place_forget()\n global x\n if x >= 2 and x < PlayerNumber:\n x = x + 1\n elif x == PlayerNumber:\n x = 1\n BankerShown = True\n ShowCardsShown()\n for i in range(2,PlayerNumber+1):\n PlayerShown(i)\n labelPlayerName=Label(root,text=PlayerDisplay[x]+\"'s turn:\",bg=\"#b1b1b1\",anchor=\"w\",font=(\"造字工房悦黑(非商用)纤细体\",18,\"bold\"))\n labelPlayerName.place(height=50,width=200,x=15+15,y=10+70*PlayerNumber+5)\n ButtonHit['state']=\"normal\"\n if x == 1:\n ButtonNext['text'] = \"END\"\n ButtonNext.bind(\"\",ShowResult)\n root.bind(\"\",KeyboardCheck)\n\ndef ShowResult(event):\n temp = \"\"\n for i in range (2,PlayerNumber+1):\n temp = temp+\"Banker VS \"+PlayerDisplay[i]+\"\\n\"\n if StatusCount[1] != Busted and StatusCount[i] != Busted:\n if PointCount[1] > PointCount[i]:\n temp = temp+\" Banker wins\"\n elif PointCount[1] < PointCount[i]:\n temp = temp+\" \"+PlayerDisplay[i]+\" wins\"\n elif PointCount[1] == PointCount[i]:\n temp = temp+\" Draw\"\n elif StatusCount[1] != Busted and StatusCount[i] == Busted:\n temp = temp+\" Banker wins\"\n elif StatusCount[1] == Busted and StatusCount[i] != Busted:\n temp = temp+\" \"+PlayerDisplay[i]+\" wins\"\n elif StatusCount[1] == Busted and StatusCount[i] == Busted:\n temp = temp+\" Banker wins\"\n temp = temp+\"\\n\"\n messagebox.showinfo(\"Game finished\",temp)\n\ndef DisableHit(Button):\n Button['state']='disabled'\n\ndef RestartButton():\n ButtonRestart=Button(root,activebackground=\"#bbbbbb\",text=\"RESTART\",font=(\"造字工房悦黑(非商用)纤细体\",18,\"bold\"))\n ButtonRestart.place(height=60,width=120,x=335,y=10+70*PlayerNumber+10)\n ButtonRestart.bind(\"\",Restart)\n root.bind(\"\",KeyboardCheck)\n\ndef Restart(event):\n LoadBackGround()\n Initialize()\n SetPlayers()\n\ndef Main():\n LoadInterface()\n Initialize()\n SetPlayers()\n\ndef SubMain():\n global labelShowInfo\n labelShowInfo = Label(root)\n global BankerShown\n BankerShown = False\n DisplayPlayers()\n PlayersDraw()\n ShowCardsHidden()\n PlayerTurn(x)\n HitButton(x)\n StandButton(x)\n ShowInfoButton()\n NextButton()\n RestartButton()\n\nMain()\nroot.mainloop()\n","sub_path":"BlackJack/Records/New Black Jack/New Black Jack_6.0.pyw","file_name":"New Black Jack_6.0.pyw","file_ext":"pyw","file_size_in_byte":15489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"97461071","text":"def solution(N, stages):\n answer = []\n stages.sort()\n denominator = 0\n arr = [0] * N\n pctg = [0] * N\n for i in reversed(stages):\n denominator += 1\n if i <= N:\n arr[i - 1] += 1\n pctg[i - 1] = arr[i - 1] / denominator\n\n for i in range(N):\n pctg[i] = [pctg[i], N-i]\n pctg.sort()\n answer = [N-i[1]+1 for i in pctg]\n answer.reverse()\n return answer\n# 34215\nprint(solution(\t5, [2, 1, 2, 6, 2, 4, 3, 3]))\n","sub_path":"실패율.py","file_name":"실패율.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"133544348","text":"from os.path import exists, expanduser\nfrom typing import List\nfrom pprint import pformat\n\nfrom pytezos.operation.result import OperationResult\nfrom pytezos.michelson.contract import Contract\nfrom pytezos.michelson.converter import convert\nfrom pytezos.michelson.micheline import skip_nones\nfrom pytezos.michelson.formatter import micheline_to_michelson\nfrom pytezos.operation.group import OperationGroup\nfrom pytezos.operation.content import format_mutez, format_tez\nfrom pytezos.interop import Interop\nfrom pytezos.tools.docstring import get_class_docstring\nfrom pytezos.repl.interpreter import Interpreter\n\n\nclass ContractCallResult(OperationResult):\n\n @classmethod\n def from_contract_call(cls, operation_group: dict, address, contract: Contract) -> list:\n results = list()\n for content in OperationResult.iter_contents(operation_group):\n if content['kind'] == 'transaction':\n if content['destination'] == address:\n results.append(cls.from_transaction(content))\n elif content['kind'] == 'origination':\n result = cls.get_result(content)\n if address in result.get('originated_contracts', []):\n results.append(cls.from_origination(content))\n\n def decode_result(res):\n kwargs = dict(storage=contract.storage.decode(res.storage))\n if hasattr(res, 'big_map_diff'):\n contract.storage.big_map_init(res.storage)\n kwargs.update(big_map_diff=contract.storage.big_map_diff_decode(res.big_map_diff))\n if hasattr(res, 'parameters'):\n kwargs.update(parameters=contract.parameter.decode(data=res.parameters))\n if hasattr(res, 'operations'):\n kwargs.update(operations=res.operations)\n return cls(**kwargs)\n\n return list(map(decode_result, results))\n\n @classmethod\n def from_code_run(cls, code_run: dict, parameters, contract: Contract):\n contract.storage.big_map_init(code_run['storage'])\n return cls(\n parameters=contract.parameter.decode(parameters),\n storage=contract.storage.decode(code_run['storage']),\n big_map_diff=contract.storage.big_map_diff_decode(code_run.get('big_map_diff', [])),\n operations=code_run.get('operations', [])\n )\n\n @classmethod\n def from_repl_result(cls, res: dict, parameters, contract: Contract):\n return cls(\n parameters=contract.parameter.decode(parameters),\n storage=contract.storage.decode(res['result']['storage'].val_expr),\n big_map_diff=contract.storage.big_map_diff_decode(res['result']['big_map_diff']),\n operations=[x.content for x in res['result']['operations']]\n )\n\n\nclass ContractCall(Interop):\n\n def __init__(self, parameters,\n address=None, contract: Contract = None, factory=Contract, amount=0, shell=None, key=None):\n super(ContractCall, self).__init__(shell=shell, key=key)\n self.parameters = parameters\n self.address = address\n self.amount = amount\n\n if contract is None:\n assert address is not None\n contract = factory.from_micheline(self.shell.contracts[address].code())\n\n self.contract = contract\n\n def _spawn(self, **kwargs):\n return ContractCall(\n parameters=self.parameters,\n address=self.address,\n contract=self.contract,\n amount=kwargs.get('amount', self.amount),\n shell=kwargs.get('shell', self.shell),\n key=kwargs.get('key', self.key)\n )\n\n def __repr__(self):\n res = [\n super(ContractCall, self).__repr__(),\n f'.address # {self.address}',\n f'.amount # {self.amount}',\n '\\nParameters',\n pformat(self.parameters),\n '\\nHelpers',\n get_class_docstring(self.__class__)\n ]\n return '\\n'.join(res)\n\n def with_amount(self, amount):\n \"\"\"\n Send funds to the contract too.\n :param amount: amount in microtez (int) or tez (Decimal)\n :return: ContractCall\n \"\"\"\n return self._spawn(amount=amount)\n\n @property\n def operation_group(self) -> OperationGroup:\n \"\"\"\n Show generated operation group.\n :return: OperationGroup\n \"\"\"\n return OperationGroup(shell=self.shell, key=self.key) \\\n .transaction(destination=self.address,\n amount=self.amount,\n parameters=self.parameters) \\\n .fill()\n\n def inject(self):\n \"\"\"\n Autofill, sign and inject resulting operation group.\n \"\"\"\n return self.operation_group.autofill().sign().inject()\n\n def cmdline(self):\n \"\"\"\n Generate command line for tezos client.\n :return: str\n \"\"\"\n arg = micheline_to_michelson(self.parameters['value'], inline=True)\n source = self.key.public_key_hash()\n amount = format_tez(self.amount)\n entrypoint = self.parameters['entrypoint']\n return f'transfer {amount} from {source} to {self.address} ' \\\n f'--entrypoint \\'{entrypoint}\\' --arg \\'{arg}\\''\n\n def interpret(self, storage, source=None, sender=None, amount=None, balance=None, chain_id=None, now=None):\n \"\"\"\n Run code in the builtin REPL (WARNING! Not recommended for critical tasks)\n :param storage: Python object\n :param source: patch SOURCE\n :param sender: patch SENDER\n :param amount: patch AMOUNT\n :param balance: patch BALANCE\n :param chain_id: patch CHAIN_ID\n :param now: patch NOW\n :return: ContractCallResult\n \"\"\"\n i = Interpreter()\n i.execute(self.contract.text)\n\n patch_map = {\n 'SOURCE': source,\n 'SENDER': sender,\n 'AMOUNT': amount,\n 'BALANCE': balance,\n 'CHAIN_ID': chain_id,\n 'NOW': now\n }\n for instr, value in patch_map.items():\n if value is not None:\n value = f'\"{value}\"' if isinstance(value, str) else value\n i.execute(f'PATCH {instr} {value}')\n\n s_expr = micheline_to_michelson(self.contract.storage.encode(storage), inline=True)\n p_expr = micheline_to_michelson(self.parameters['value'], inline=True)\n res = i.execute(f'RUN %{self.parameters[\"entrypoint\"]} ({p_expr}) ({s_expr})')\n\n return ContractCallResult.from_repl_result(\n res, parameters=self.parameters, contract=self.contract)\n\n def result(self, storage=None, source=None, sender=None, gas_limit=None):\n \"\"\"\n Simulate operation and parse the result.\n :param storage: Python object only. If storage is specified, `run_code` is called instead of `run_operation`.\n :param source: Can be specified for unit testing purposes\n :param sender: Can be specified for unit testing purposes,\n see https://tezos.gitlab.io/whitedoc/michelson.html#operations-on-contracts for the difference\n :param gas_limit: Specify gas limit (default is gas hard limit)\n :return: ContractCallResult\n \"\"\"\n chain_id = self.shell.chains.main.chain_id()\n if storage is not None:\n query = skip_nones(\n script=self.contract.code,\n storage=self.contract.storage.encode(storage),\n entrypoint=self.parameters['entrypoint'],\n input=self.parameters['value'],\n amount=format_mutez(self.amount),\n chain_id=chain_id,\n source=sender,\n payer=source,\n gas=gas_limit\n )\n code_run_res = self.shell.head.helpers.scripts.run_code.post(query)\n return ContractCallResult.from_code_run(\n code_run_res, parameters=self.parameters, contract=self.contract)\n else:\n opg_with_metadata = self.operation_group.fill().run()\n res = ContractCallResult.from_contract_call(\n opg_with_metadata, address=self.address, contract=self.contract)\n return res[0] if res else None\n\n def view(self):\n \"\"\"\n Get return value of a view method.\n :return: object\n \"\"\"\n opg_with_metadata = self.operation_group.fill().run()\n view_operation = OperationResult.get_contents(opg_with_metadata, source=self.address)[0]\n view_contract = Contract.from_micheline(self.shell.contracts[view_operation['destination']].code())\n return view_contract.parameter.decode(view_operation['parameters'])\n\n\nclass ContractEntrypoint(Interop):\n\n def __init__(self, name, address=None, contract: Contract = None, factory=Contract, shell=None, key=None):\n super(ContractEntrypoint, self).__init__(shell=shell, key=key)\n if contract is None:\n assert address is not None\n code = self.shell.contracts[address].code()\n contract = factory.from_micheline(code)\n\n self.contract = contract\n self.name = name\n self.address = address\n\n def _spawn(self, **kwargs):\n return ContractEntrypoint(\n name=self.name,\n contract=self.contract,\n address=self.address,\n shell=kwargs.get('shell', self.shell),\n key=kwargs.get('key', self.key),\n )\n\n def __repr__(self):\n res = [\n super(ContractEntrypoint, self).__repr__(),\n f'.address # {self.address}',\n f'\\n{self.__doc__}'\n ]\n return '\\n'.join(res)\n\n def __call__(self, *args, **kwargs):\n if args:\n if len(args) == 1:\n (data, is_single) = (args[0], True)\n else:\n (data, is_single) = (list(args), False)\n elif kwargs:\n (data, is_single) = (kwargs, False)\n else:\n (data, is_single) = ([], False)\n\n if self.name:\n data = {self.name: data} if data or is_single else self.name\n\n parameters = self.contract.parameter.encode(data)\n return ContractCall(\n parameters=parameters,\n address=self.address,\n contract=self.contract,\n shell=self.shell,\n key=self.key,\n )\n\n\nclass ContractInterface(Interop):\n __default_entry__ = 'call'\n\n def __init__(self, address=None, contract: Contract = None, factory=Contract, shell=None, key=None):\n super(ContractInterface, self).__init__(shell=shell, key=key)\n if contract is None:\n assert address is not None\n code = self.shell.contracts[address].code()\n contract = factory.from_micheline(code)\n\n self.contract = contract\n self.address = address\n\n for entry_name, docstring in contract.parameter.entries(default=self.__default_entry__):\n entry_point = ContractEntrypoint(\n name=entry_name if entry_name != self.__default_entry__ else None,\n address=self.address,\n contract=contract,\n shell=self.shell,\n key=self.key\n )\n entry_point.__doc__ = docstring\n setattr(self, entry_name, entry_point)\n\n def _spawn(self, **kwargs):\n return ContractInterface(\n address=self.address,\n contract=self.contract,\n shell=kwargs.get('shell', self.shell),\n key=kwargs.get('key', self.key)\n )\n\n def __repr__(self):\n entrypoints, _ = zip(*self.contract.parameter.entries(default=self.__default_entry__))\n res = [\n super(ContractInterface, self).__repr__(),\n f'.address # {self.address}',\n '\\nEntrypoints',\n *list(map(lambda x: f'.{x}()', entrypoints)),\n '\\nHelpers',\n get_class_docstring(self.__class__,\n attr_filter=lambda x: not x.startswith('_') and x not in entrypoints)\n ]\n return '\\n'.join(res)\n\n @classmethod\n def create_from(cls, source, shell=None, factory=Contract):\n if isinstance(source, str) and exists(expanduser(source)):\n contract = factory.from_file(source)\n else:\n contract = factory(convert(source, output='micheline'))\n\n return ContractInterface(contract=contract, shell=shell)\n\n def big_map_get(self, path, block_id='head'):\n \"\"\"\n Get BigMap entry as Python object by plain key and block height\n :param path: Json path to the key (or just key to access default BigMap location)\n :param block_id: Block height / hash / offset to use, default is `head`\n :return: object\n \"\"\"\n query = self.contract.storage.big_map_query(path)\n if query.get('big_map_id'):\n value = self.shell.blocks[block_id].context.big_maps[query['big_map_id']][query['script_expr']]()\n else:\n value = self.shell.blocks[block_id].context.contracts[self.address].big_map_get.post(query)\n return self.contract.storage.big_map_decode(value, query.get('big_map_id'))\n\n def storage(self, block_id='head'):\n \"\"\"\n Get storage as Pythons object at specified block height.\n :param block_id: Block height / hash / offset to use, default is `head`\n :return: object\n \"\"\"\n storage = self.shell.blocks[block_id].context.contracts[self.address].storage()\n return self.contract.storage.decode(storage)\n\n def operation_result(self, operation_group: dict) -> List[ContractCallResult]:\n \"\"\"\n Get operation parameters, storage and big_map_diff as Python objects.\n Can locate operation inside operation groups with multiple contents and/or internal operations.\n :param operation_group: {'branch', 'protocol', 'contents', 'signature'}\n :return: ContractCallResult\n \"\"\"\n return ContractCallResult.from_contract_call(\n operation_group, address=self.address, contract=self.contract)\n\n def manager(self):\n \"\"\"\n Get contract manager address (tz)\n :return: str\n \"\"\"\n return self.shell.block.context.contracts[self.address].manager()\n","sub_path":"pytezos/michelson/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":14286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"527921009","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Blockchain',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('timestamp', models.FloatField(null=True, blank=True)),\n ('usd', models.FloatField(null=True, blank=True)),\n ('volume_usd', models.FloatField(null=True, blank=True)),\n ('volume_btc', models.FloatField(null=True, blank=True)),\n ('transactions', models.IntegerField(null=True, blank=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Source',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(unique=True, max_length=128)),\n ('url', models.URLField()),\n ('image', models.CharField(default=b'none', max_length=128)),\n ],\n options={\n 'verbose_name_plural': 'sources',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='SpotPrice',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created', models.DateTimeField(editable=False)),\n ('price', models.FloatField(default=0)),\n ('source', models.ForeignKey(to='ecash.Source')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"ecash/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"622494419","text":"from model.group import Group\nfrom model.contact import Contact\nimport random\n\ndef test_delete_contact_from_group(app, orm):\n if len(orm.get_group_list()) == 0:\n app.group.create(Group(name=\"test\"))\n if len(orm.get_contact_list()) == 0:\n app.contact.create(Contact(firstname=\"New\",\n lastname=\"Contact\",\n nickname=\"test\",\n home_phone=\"79009009090\",\n mobile_phone=\"901111\",\n work_phone=\"80245\",\n secondary_phone='7(902)001',\n email1=\"mail1@mail.ru\", email2=\"mail2@mail.ru\", email3=\"mail3@mail.ru\",\n address=\"Address one\"))\n all_groups = orm.get_group_list()\n group = random.choice(all_groups)\n all_contacts = orm.get_contact_list()\n contact = random.choice(all_contacts)\n if contact not in (orm.get_contacts_in_group(group)):\n app.contact.add_contact_to_group(contact, group)\n app.contact.delete_contact_from_group(contact, group)\n assert contact not in orm.get_contacts_in_group(group)","sub_path":"test/test_delete_contact_from_group.py","file_name":"test_delete_contact_from_group.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"74081979","text":"import glob\nimport re\n\n\ndef removeComments(html):\n return re.sub(\"()\", \"\", html, flags=re.DOTALL)\n\n\ndef extractHeadAndBody(html):\n start = html.find(\"\")\n end = html.find(\"\") + len(\"\")\n return html[start:end]\n\n\ndef removeScripts(html):\n return re.sub(\"()\", \"\", html, flags=re.DOTALL)\n\n\ndef replaceLinks(html):\n html = re.sub(\n r'Index.html', r'https://idrottonline.se/HogdalensJK-Judo', html)\n html = re.sub(r'Information.html',\n r'https://idrottonline.se/HogdalensJK-Judo/Information', html)\n html = re.sub(r'Kalendarium.html',\n r'https://idrottonline.se/HogdalensJK-Judo/Kalendarium', html)\n html = re.sub(\n r'Arkiv.html', r'https://idrottonline.se/HogdalensJK-Judo/Arkiv', html)\n html = re.sub(r'Ledning.html',\n r'https://idrottonline.se/HogdalensJK-Judo/Ledning', html)\n return re.sub(r'./images/', r'https://idrottonline.se/HogdalensJK-Judo/globalassets/hogdalens-jk---judo/bilder/', html)\n\n\nif __name__ == \"__main__\":\n for inFile in glob.glob(\"*.html\"):\n with open(inFile, 'r', encoding='utf-8') as f:\n content = f.read()\n\n content = content.strip()\n content = removeComments(content)\n content = removeScripts(content)\n content = replaceLinks(content)\n content = extractHeadAndBody(content)\n content = re.sub(r\"\\s+\", \" \", content)\n content = re.sub(\"> <\", \"><\", content)\n content = re.sub(\"\\\"\", \"'\", content)\n\n outFile = open(\n \"minified/\" + inFile[:-5] + \".min.html\", \"w\", encoding='utf-8')\n outFile.write(content)\n outFile.close()\n","sub_path":"website/minifier.py","file_name":"minifier.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"117638772","text":"import numpy as np\nfrom helpers import equalize\nfrom slider import Frequency_Slider\nimport tkinter as tk\n\nclass Equalizer_Panel(tk.Frame):\n def __init__(self, master, viewer, Fs, bins=1):\n super().__init__(master,bd=1, relief=\"solid\")\n self.master = master\n self.bins = bins\n self.Fs = Fs\n self.viewer = viewer\n self.original_fourier = np.fft.fft(viewer.signal[\"samples\"])\n self.equalized_fourier = self.original_fourier\n # main frame\n self.rowconfigure(0, weight=1)\n self.columnconfigure(0, weight=1)\n self.frameScale = tk.Frame(self)\n self.intervalScale = tk.Frame(self)\n \n self.interval_value = tk.IntVar()\n self.interval_slider = tk.Scale(self.intervalScale, orient=\"horizontal\", label=\"Speed\", from_=1,\n to=1.9, resolution=0.1, variable=self.interval_value)\n self.interval_slider.grid(row=0, column=0, padx=5, pady=3)\n self.interval_value.set(1)\n self.interval_slider.bind(\"\",lambda e: self.increase_speed(float(self.interval_slider.get())))\n \n self.fmin_value = tk.IntVar()\n self.fmin_slider = tk.Scale(self.intervalScale, orient=\"horizontal\", label=\"Fmin (Hz)\", from_=1,\n to=self.Fs/2, resolution=1, variable=self.fmin_value)\n self.fmin_slider.grid(row=1, column=0, padx=5, pady=3)\n self.fmin_value.set(1)\n \n self.fmax_value = tk.IntVar()\n self.fmax_slider = tk.Scale(self.intervalScale, orient=\"horizontal\", label=\"Fmax (Hz)\", from_=1,\n to=self.Fs/2, resolution=1, variable=self.fmax_value)\n self.fmax_slider.grid(row=2, column=0, padx=5, pady=3)\n self.fmax_value.set(self.Fs/2)\n\n self.update_range_btn = tk.Button(self.intervalScale,command=lambda : self.update_range(),text=\"Update Range\")\n self.update_range_btn.grid(row=3,column=0, padx=5, pady=3)\n # creating the sliders loop\n for i in range(bins):\n slider = Frequency_Slider(self.frameScale, fmin=\n (i)*(Fs/2)/bins, fmax=(i+1)*(Fs/2)/bins)\n\n slider.grid(row=0, column=i+1)\n\n self.intervalScale.grid(column=0, row=0)\n self.frameScale.grid(column=1, row=0)\n\n def increase_speed(self, value):\n self.viewer.pause()\n self.viewer._figure.axes[0].clear()\n self.viewer._figure.axes[2].clear()\n self.viewer.plot(animated=True, interval=2-value)\n\n def update_spectrogram(self, fmin, fmax, inclusive_factor,exclusive_factor=1):\n \n self.equalized_fourier = equalize(\n self.original_fourier, self.equalized_fourier, self.viewer.signal[\"N\"],self.viewer.signal[\"Fs\"],fmin, fmax, inclusive_factor,exclusive_factor)\n \n self.viewer.update_equalized_samples(np.fft.ifft(self.equalized_fourier).real)\n \n self.viewer._figure.axes[1].set_ylim(self.fmin_value.get(),self.fmax_value.get())\n self.viewer._figure.axes[3].set_ylim(self.fmin_value.get(),self.fmax_value.get())\n self.viewer._figure.canvas.draw_idle()\n\n def update_range(self):\n fmin = self.fmin_value.get()\n fmax = self.fmax_value.get()\n diff = fmax - fmin\n\n for i,slider in enumerate(self.frameScale.children.values()):\n slider.set_range(fmin + (i)*diff/self.bins,fmin + (i+1)*diff/self.bins)\n \n self.update_spectrogram(fmin,fmax,1,0)","sub_path":"equalizer.py","file_name":"equalizer.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"299947912","text":"\nboard = [' ' for x in range(10)]\n\ndef paintBoard(board):\n print(' | |')\n print(' ' + board[1] + ' | ' + board[2] + ' | ' + board[3])\n print(' | |')\n print('-----------')\n print(' | |')\n print(' ' + board[4] + ' | ' + board[5] + ' | ' + board[6])\n print(' | |')\n print('-----------')\n print(' | |')\n print(' ' + board[7] + ' | ' + board[8] + ' | ' + board[9])\n print(' | |')\n print()\n\ndef insertLetter(letter, pos):\n board[pos] = letter\n\ndef isFieldFree(pos):\n return board[pos] == ' '\n\ndef isWinner(bo, le):\n return (bo[1]==le and bo[2]==le and bo[3]==le) or (bo[4]==le and bo[5]==le and bo[6]==le) or (bo[7]==le and bo[8]==le and bo[9]==le) or (bo[1]==le and bo[4]==le and bo[7]==le) or (bo[2]==le and bo[5]==le and bo[8]==le) or (bo[3]==le and bo[6]==le and bo[9]==le) or (bo[1]==le and bo[5]==le and bo[9]==le) or (bo[7]==le and bo[5]==le and bo[3]==le)\n\ndef playerMove(letter):\n run = True\n while run:\n move = input('Pleas select a position to place an \\'' + letter + '\\' (1-9):')\n try:\n move = int(move)\n if move > 0 and move < 10:\n if isFieldFree(move):\n run = False\n insertLetter(letter, move)\n else:\n print('Sorry this position is ocupied!')\n else:\n print('Pleas type the number within the range!')\n except:\n print('Pleas type a number!')\n\ndef isBoardFull():\n if board.count(' ') > 1:\n return False\n else:\n return True\n\ndef compMove():\n possibleMoves = [x for x, letter in enumerate(board) if letter == ' ' and x !=0]\n move = 0\n\n for let in ['X', 'O']:\n for i in possibleMoves:\n boardCopy = board[:]\n boardCopy[i] = let\n if isWinner(boardCopy, let):\n move = i\n return move\n\n cornersOpen = []\n for i in possibleMoves:\n if i in [1, 3, 7, 9]:\n cornersOpen.append(i)\n if len(cornersOpen) > 0:\n move = selectRandrom(cornersOpen)\n return move\n\n edgesOpen = []\n for i in possibleMoves:\n if i in [2, 4, 6, 8]:\n edgesOpen.append(i)\n if len(edgesOpen) > 0:\n move = selectRandrom(edgesOpen)\n\n return move\n\n\ndef selectRandrom(tab):\n import random\n ln = len(tab)\n r = random.randrange(0,ln)\n return tab[r]\n\ndef gamePvP():\n while True:\n if isWinner(board, 'X'):\n print('The winner is X! Good game.')\n break\n elif isBoardFull():\n print(\"TIE Game!\")\n break\n playerMove('O')\n paintBoard(board)\n\n if isWinner(board, 'O'):\n print('The winner is O! Good game.')\n break\n elif isBoardFull():\n print(\"TIE Game!\")\n break\n playerMove('X')\n paintBoard(board)\n\ndef gameWithComp():\n while True:\n if isWinner(board, 'X'):\n print('The winner is X! Good luck next time.')\n break\n elif isBoardFull():\n print(\"TIE Game!\")\n break\n playerMove('O')\n paintBoard(board)\n\n if isWinner(board, 'O'):\n print('The winner is O! Good game.')\n break\n elif isBoardFull():\n print(\"TIE Game!\")\n break\n insertLetter('X', compMove())\n paintBoard(board)\n\ndef main():\n game_option = input('Press \\'a\\' to play with computer or any other key to play with another player')\n paintBoard(board)\n if game_option == 'a' or game_option == 'A':\n gameWithComp()\n else:\n gamePvP()\n\n print('Wanna play again?')\n input('Press any button to restart')\n for x in range(10):\n board[x] = ' '\n main()\n\nmain()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"480307518","text":"import pytest\nimport torch\nfrom torch.testing import assert_allclose\n\nfrom asteroid import filterbanks as fb\nfrom asteroid.filterbanks.inputs_and_masks import _masks\nfrom asteroid.filterbanks import inputs_and_masks\n\n\nCOMPLEX_FBS = [\n fb.STFTFB,\n fb.ParamSincFB,\n fb.AnalyticFreeFB\n]\n\n\n@pytest.fixture(scope=\"module\")\ndef fb_config_list():\n keys = ['n_filters', 'kernel_size', 'stride']\n param_list = [\n [512, 256, 128],\n [512, 256, 64],\n [512, 32, 16],\n [512, 16, 8],\n [512, 257, 64],\n [512, 80, 40],\n [513, 80, 40],\n [514, 80, 40]\n ]\n return [dict(zip(keys, values)) for values in param_list]\n\n\n@pytest.fixture(scope=\"module\")\ndef encoder_list(fb_config_list):\n enc_list = []\n for fb_class in COMPLEX_FBS:\n for fb_config in fb_config_list:\n enc_list.append(make_encoder_from(fb_class, fb_config))\n return enc_list\n\n\ndef make_encoder_from(fb_class, config):\n enc = fb.Encoder(fb_class(**config))\n fb_dim = enc.filterbank.n_feats_out\n return enc, fb_dim\n\n\ndef test_mag_mask(encoder_list):\n \"\"\" Assert identity mask works. \"\"\"\n for (enc, fb_dim) in encoder_list:\n tf_rep = enc(torch.randn(2, 1, 16000)) # [batch, freq, time]\n id_mag_mask = torch.ones((1, fb_dim//2, 1))\n masked = inputs_and_masks.apply_mag_mask(tf_rep, id_mag_mask, dim=1)\n assert_allclose(masked, tf_rep)\n\n\ndef test_reim_mask(encoder_list):\n \"\"\" Assert identity mask works. \"\"\"\n for (enc, fb_dim) in encoder_list:\n tf_rep = enc(torch.randn(2, 1, 16000)) # [batch, freq, time]\n id_reim_mask = torch.ones((1, fb_dim, 1))\n masked = inputs_and_masks.apply_real_mask(tf_rep, id_reim_mask, dim=1)\n assert_allclose(masked, tf_rep)\n\n\ndef test_comp_mask(encoder_list):\n \"\"\" Assert identity mask works. \"\"\"\n for (enc, fb_dim) in encoder_list:\n tf_rep = enc(torch.randn(2, 1, 16000)) # [batch, freq, time]\n id_complex_mask = torch.cat((torch.ones((1, fb_dim // 2, 1)),\n torch.zeros((1, fb_dim // 2, 1))),\n dim=1)\n masked = inputs_and_masks.apply_complex_mask(tf_rep, id_complex_mask,\n dim=1)\n assert_allclose(masked, tf_rep)\n\n\ndef test_reim(encoder_list):\n for (enc, fb_dim) in encoder_list:\n tf_rep = enc(torch.randn(2, 1, 16000)) # [batch, freq, time]\n assert_allclose(tf_rep, inputs_and_masks.take_reim(tf_rep))\n\n\ndef test_mag(encoder_list):\n for (enc, fb_dim) in encoder_list:\n tf_rep = enc(torch.randn(2, 1, 16000)) # [batch, freq, time]\n batch, freq, time = tf_rep.shape\n mag = inputs_and_masks.take_mag(tf_rep, dim=1)\n assert mag.shape == (batch, freq // 2, time)\n\n\ndef test_cat(encoder_list):\n for (enc, fb_dim) in encoder_list:\n tf_rep = enc(torch.randn(2, 1, 16000)) # [batch, freq, time]\n batch, freq, time = tf_rep.shape\n mag = inputs_and_masks.take_cat(tf_rep, dim=1)\n assert mag.shape == (batch, 3 * (freq // 2), time)\n","sub_path":"tests/filterbanks/inputs_and_masks_test.py","file_name":"inputs_and_masks_test.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"109178894","text":"# rozwiązania zadań po spotkaniu 4\n\n\ndef rysowanie_prostokata():\n \"\"\"Funkcja rysująca prostokąt o zadanych rozmiarach (wysokość i szerokość)\n za pomocą znaków | (bok) - (góra/dół) + (wierzchołek)\"\"\"\n print(\"Rysowanie prostokątu o zadanych parametrach\")\n powtorz = \"T\"\n while powtorz == \"T\":\n bok_a = input(\"Podaj wymiar pierwszego boku: \")\n bok_b = input(\"Podaj wymiar drugiego boku: \")\n gora = \"+\" + int(bok_a) * \"-\" + \"+\"\n srodek = \"\"\n dol = \"+\" + int(bok_a) * \"-\" + \"+\"\n if bok_a.isdigit() is True and bok_b.isdigit() is True: # sprawdzenie czy obie wartości to liczby całkowite\n for i in range(0, int(bok_b)):\n srodek = \"|\" + int(bok_a) * \" \" + \"|\"\n print(gora)\n print(srodek)\n print(dol)\n else:\n blad = \"Wpisałeś nieporpawne wartości. Użyj tylko liczb całkowitych\"\n print(blad)\n powtorz = input(\"\\nCzy chcesz powtórzyć? [T/N]\").upper()\n# ======================================================================================================================\n\n\ndef rysowanie_piramidy():\n \"\"\"Program rysujący piramidę o określonej wysokości, np. dla 3\n #\n ###\n #####\n \"\"\"\n print(\"\\nRysowanie piramidy o określonej wysokości\")\n jeszcze_raz = \"T\"\n while jeszcze_raz == \"T\":\n wysokosc = input(\"Ile pięter ma mieć piramida? \")\n if wysokosc.isdigit() is True:\n wysokosc = int(wysokosc)\n for i in range(0, wysokosc):\n piramida = \" \"*(wysokosc-i-1)+\"#\"+\"#\"*i*2\n print(piramida)\n else:\n blad = f\"{wysokosc} to nie jest liczba całkowita. Jakby miałabym narysować półpiętra? :)\"\n print(blad)\n jeszcze_raz = input(\"\\nCzy chcesz powtórzyć? [T/N]\").upper()\n# ======================================================================================================================\n\n\ndef kalkulator_celsjusz_fahenheit():\n \"\"\"Kalkulator do przeliczania stopni Celsjusza na Fahrenheita\"\"\"\n print(\"\\nPrzeliczanie stopni Celsjusza na Fahrenheita\")\n jeszcze_raz = \"T\"\n while jeszcze_raz == \"T\":\n celsjusz_str = input(\"Podaj temperaturę w stopniach Celsjusza: \")\n celsjusz = \"\"\n if celsjusz_str.isdigit() is True: # tutaj powinno być sprawdzenie czy liczba jest float a nie integer\n celsjusz = float(celsjusz_str)\n wzor = 32 + 1.8 * celsjusz # jak podać wzór jako zawartość, a nie jako wynik do zmiennej wynik?\n fahrenheit_temp = round(wzor, 1)\n wynik = f\"{celsjusz} stopni Celsjusza to {fahrenheit_temp} w stopaniach Fahrenheita.\" \\\n f\"Wzór to: 32 + 1.8 * celsjusz \"\n else:\n wynik = f\"{celsjusz} to nie jest poprwana temperatura\"\n print(wynik)\n jeszcze_raz = input(\"\\nCzy chcesz powtórzyć? [T/N]\").upper()\n# =======================================================================================================================\n\n\ndef kalkulator_fahenheit_celsjusz():\n \"\"\"Kalkulator do przeliczania stopni Fahrenheita na Celsjusza\"\"\"\n print(\"\\nPrzeliczanie stopni Fahrenheita na Celsjusza\")\n jeszcze_raz = \"T\"\n while jeszcze_raz == \"T\":\n fahrenheit = input(\"Podaj temperaturę w stopniach Fahrenheita: \")\n if fahrenheit.isdigit() is True: # tutaj powinno być sprawdzenie czy liczba jest float a nie integer\n celsjusz_temp = round(((float(fahrenheit) - 32) / 1.8), 1)\n wynik = f\"{fahrenheit} stopni Fahrenheit to {celsjusz_temp} w stopaniach Celsjusza.\" \\\n f\" Wzór to: (Fahrenheit-32)/1.8\" # jak podać wzór z zmiennej Celsjusz_temp?\n else:\n wynik = f\"{fahrenheit} to nie jest poprwana temperatura\"\n print(wynik)\n jeszcze_raz = input(\"\\nCzy chcesz powtórzyć? [T/N]\").upper()\n# ======================================================================================================================\n\n\ndef cyfra_pierwsza_ostatnia():\n \"\"\"Program, który podaje pierwszą i ostatnią cyfrę podanej liczby\"\"\"\n print(\"\\nPierwsza i ostatnia cyfra podanej liczby\")\n jeszcze_raz = \"T\"\n while jeszcze_raz == \"T\":\n liczba = input(\"Podaj dwolną liczbę całkowitą: \")\n if liczba.isdigit() is False:\n wynik = f\"{liczba} to nie jest liczba całkowita\"\n else:\n wynik = f\"Wpisano liczbę {liczba}. Cyfra pierwsza to {liczba[0]}, a cyfra ostatnia to {liczba[-1]}.\"\n print(wynik)\n jeszcze_raz = input(\"\\nCzy chcesz powtórzyć? [T/N]\").upper()\n# ======================================================================================================================\n\n\ndef binara_na_dziesietna(): # to jest do dopracowania\n \"\"\"Przeliczanie liczy binarnej na dziesiętną\"\"\"\n jeszcze_raz = \"T\"\n while jeszcze_raz == \"T\":\n binarna = input(\"Podaj liczbę binarną do przeliczenia na dziesiętną: \")\n ilosc_0 = binarna.count(\"0\")\n ilosc_1 = binarna.count(\"1\")\n dlugosc_liczby = len(binarna)\n licznik = 1\n if ilosc_0 == dlugosc_liczby or ilosc_1 == dlugosc_liczby or ilosc_0+ilosc_1 == dlugosc_liczby:\n wynik = int(binarna[-1]) ** 0 # ostatnia liczba do potęgi zerowej 0**=0, dowolna inna liczba**0=1\n # sprawdzam czy liczba składa się z samych znaków 0 i 1\n while licznik in range(1, len(binarna)):\n dziesietna = int(binarna[-(licznik + 1)]) * 2 ** licznik\n wynik = wynik + dziesietna\n licznik += 1\n wynik_obliczen = f\"Liczba binarna {binarna} to liczba dziesiętna {wynik}\"\n else:\n wynik_obliczen = f\"Liczba {binarna} nie jest liczbą binarną\"\n print(wynik_obliczen)\n jeszcze_raz = input(\"\\nCzy chcesz powtórzyć? [T/N]\").upper()\n# ======================================================================================================================\n\n\ndef rok_przestepny():\n \"\"\"Program do sprawdzania czy podany rok jest rokiem przestępnym.\n Rok przestęny jest podzielny przez 4 i nie jest podzielny przez 100\n lub jest przestępny jeżeli rok jest podzielny przez 400.\"\"\"\n jeszcze_raz = \"T\"\n while jeszcze_raz == \"T\":\n rok = input(\"Podaj rok do sprawdzenia: \")\n if len(rok) != 4 or rok.isdigit() is False: # sprawdza czy są 4 znaki oraz czy znaki są liczbami\n wynik = f\"{rok} to nie jest poprawny rok\"\n elif (int(rok) % 4 == 0 and int(rok) % 100 != 0) or int(rok) % 400 == 0:\n wynik = f\"Rok {rok} jest przestępny\"\n else:\n wynik = f\"Rok {rok} nie jest przestępny\"\n print(wynik)\n jeszcze_raz = input(\"\\nCzy chcesz powtórzyć? [T/N]\").upper()\n# ======================================================================================================================\n\n\ndef kalkulator_pieskie_lata():\n \"\"\"Kalkulator do wyliczania wieku psa.\"\"\"\n jeszcze_raz = \"T\"\n while jeszcze_raz == \"T\":\n wiek_psa_str = input(\"Podaj wiek psa, a ja ci przeliczę na ludzkie lata. Podaj liczbę całkowitą \")\n if wiek_psa_str.isdigit() is False:\n wynik = f\"{wiek_psa_str} to nie jest liczba całkowita\"\n else:\n # obliczanie wieku psa\n wiek_psa_int = int(wiek_psa_str)\n if wiek_psa_int <= 2: # przez pierwsze dwa lata, każdy psi rok to 10,5 ludzkiego roku\n ludzkie_lata = wiek_psa_int * 10.5\n else: # po drugim roku psi rok to 4 ludzkie lata\n ludzkie_lata = int(2 * 10.5) + (wiek_psa_int - 2) * 4\n # sprawdzanie poprawnej odmiany lat dla lat psich\n odmiana_liczby_pies = \"lat\" # w liczbach po cyfrach 0, 1 i od 5 do 9 występuje dopełniacz liczby mnogiej\n if wiek_psa_int == 1:\n odmiana_liczby_pies = \"rok\"\n elif wiek_psa_str[-1] in (\"2\", \"3\", \"4\"): # w liczbach po cyfrach 2,3 i 4 występuje rzeczownik w mianowniku\n odmiana_liczby_pies = \"lata\"\n # sprawdzanie poprawnej odmiany lat dla lat ludzkich\n odmiana_liczby_czlowiek = \"lat\"\n if str(ludzkie_lata)[-1] in (\"2\", \"3\", \"4\"):\n odmiana_liczby_czlowiek = \"lata\"\n wynik = f\"{wiek_psa_int} {odmiana_liczby_pies} dla psa to ludzkie {ludzkie_lata} {odmiana_liczby_czlowiek}\"\n print(wynik)\n jeszcze_raz = input(\"\\nCzy chcesz powtórzyć? [T/N]\").upper()\n# ======================================================================================================================\n\n\ndef odczyty_temperatury():\n \"\"\"Zmienna dane zawiera 24 odczyty temperatury z 24 godzin.\n Każde 4 znaki to jeden odczyt w setnych stopni Celsjusza, tzn \"2150\" to 21.50°C\n Pomiary są dokonane o pełnych godzinach od 00:00 do 23:00.\n Dla odczytów niższych niż lub równych 20°C dodawany jest \"!\"\n Dla odczytów niższych niż lub równych 18,5°C dodany jeest dodatkowy wykrzyknik\"\"\"\n jeszcze_raz = \"T\"\n while jeszcze_raz == \"T\":\n dane = \"215021482120211921002076207620502065202020152010200520002001199319901950183417501744186019462010\"\n print(\"(...)\")\n for odczyt in range(0, 24): # gdyby string nie był znanej długości wtedy range(0, len(dane), 4)\n poczatek_zakresu = odczyt * 4 # co 4 cyfry ropzoczyna się nowy odczyt\n srodek_zakresu = poczatek_zakresu + 2\n koniec_zakresu = poczatek_zakresu + 4\n temperatura = float(f\"{dane[poczatek_zakresu:srodek_zakresu]}.{dane[srodek_zakresu:koniec_zakresu]}\")\n temp_float = \"%0.2f\" % temperatura # wyświetlenie liczby do 2 miejsc po przecinku\n tab = \"\"\n odczyt_format = odczyt\n if temperatura <= 18.5:\n tab = \"\\t!!\"\n elif temperatura <= 20:\n tab = \"\\t!\"\n if odczyt <= 9:\n odczyt_format = f\"0{odczyt}\" # wyświetlenie dodatkowego 0 z przodu dla cyfr do 10\n wiersz_string = f\"{odczyt_format}:00\\t{temp_float}\\u00b0C{tab}\" # \\u00b0 to znak unicode stopni Celsjusza\n print(wiersz_string)\n print(\"(...)\")\n jeszcze_raz = input(\"\\nCzy chcesz powtórzyć? [T/N]\").upper()\n# ======================================================================================================================\n\n\ndef rozmieniarka_pieniedzy():\n \"\"\"Program przyjmuje kwotę w parametrze i wylicza jak rozmienić to na monety: 5, 2, 1, 0.5, 0.2, 0.1\n wydając ich jak najmniej.\"\"\"\n jeszcze_raz = \"T\"\n while jeszcze_raz == \"T\":\n monety = [5, 2, 1, 0.5, 0.2, 0.1]\n kwota = float(input(\"Ile chcesz rozmienić? Możesz podać kwotę w zaokrągleniu do 10 groszy \"))\n for moneta in monety:\n rozmienione = kwota//moneta # znak // dzieli obcniając resztę (nazwa: floor division lub integer division)\n kwota = kwota - rozmienione*moneta # to jest reszta która została do rozmienienia\n do_wydania = f\"Monet {moneta} zł: {rozmienione}\"\n print(do_wydania)\n reszta = round(kwota, 2)\n if reszta != 0:\n reszta = f\"Nie podałeś liczby zaokrąglonej do 10 gr, więc reszta {reszta} zł jest dla mnie\"\n print(reszta)\n jeszcze_raz = input(\"\\nCzy chcesz powtórzyć? [T/N]\").upper()\n# ======================================================================================================================\n\n\ndef rysowanie_listy():\n \"\"\"Program który przyjmie w parametrze dowolną listę np ['col1', 'col2', 'col3'] i wyświetla:\n +------+------+------+\n | col1 | col2 | col3 |\n +------+------+------+\n Szerokość kolumn jest zawsze równa bez względów na zawartość, tekst wyrównany do lewej.\n Maksymalna szerokość kolumny to 30 znaków, dłuże teksty są przycinane i kończą sie trzema kropkami.\n Dla list zagnieżdżonych narysuje się tabela z odpowiednią ilością wierszy i kolumn\"\"\"\n jeszcze_raz = \"T\"\n while jeszcze_raz == \"T\":\n wersja = input(\"Którą wersję chcesz zobaczyć: jednowierszowa lista czy matryca? W/M\").capitalize()\n ramka = \"+\" + 30 * \"-\"\n if wersja == \"W\": # wersja z listą w jednym wierszu\n lista = ['tutaj jest komórka 1', 'tutaj jest komórka 2', 'tutaj jest komórka 3 - ostatnia na liście']\n dlugosc_listy = len(lista)\n srodek_lista = \"\"\n i = 0\n for opis in lista:\n if len(opis) > 30:\n srodek = \"|\" + opis[:27] + \"...\"\n else:\n srodek = \"|\" + opis + (30 - len(lista[i])) * \" \"\n srodek_lista = srodek_lista + srodek\n i += 1\n print(ramka * dlugosc_listy + \"+\")\n print(srodek_lista + \"|\")\n print(ramka * dlugosc_listy + \"+\")\n if wersja == \"M\": # wersja z matrycą dla list zagnieżdżonych\n lista = [[\"opis komórki 00 bardzo długi opis\", \"opis komóki 01\", \"opis komórki 02\"],\n [\"opis komórki 10\", \"opis komóki 11\", \"opis komórki 12\"],\n [\"opis komórki 20\", \"opis komóki 21\", \"opis komórki 22\"]]\n j = 0\n for element in lista:\n i = 0\n srodek_lista = \"\"\n for opis in element:\n if len(opis) > 30:\n srodek = \"|\" + opis[:27] + \"...\"\n else:\n srodek = \"|\" + opis + (30 - len(opis)) * \" \"\n srodek_lista = srodek_lista + srodek\n i += 1\n if j == 0:\n print(ramka * i + \"+\")\n print(srodek_lista + \"|\")\n print(ramka * i + \"+\")\n elif j >= 1:\n print(srodek_lista + \"|\")\n print(ramka * i + \"+\")\n j += 1\n jeszcze_raz = input(\"\\nCzy chcesz powtórzyć? [T/N]\").upper()\n# ======================================================================================================================\n\n\nlista_programów = [\"Witaj w Multitool. Wybierz program który cię interesuje: \",\n \"1) Rysowanie prostokątu o zadanych parametrach\",\n \"2) Rysowanie piramidy o określonej wysokości\",\n \"3) Przeliczanie C->F\",\n \"4) Przeliczanie F->C\",\n \"5) Pierwsza i ostatnia cyfra z liczby\",\n \"6) Przeliczanie liczy binarnej na dziesiętną\",\n \"7) Sprawdenie czy rok jest przestępny\",\n \"8) Kalkulator do wyliczania wieku psa\",\n \"9) Odczytywanie_temperatury\",\n \"10) Rozmienianie pieniędzy\",\n \"11) Rysowanie listy\",\n \"R) Wybierz program losowo bo nie wiem czego szukam:)\",\n \"X) Wyjście z programu\"]\nzapytaj_ponownie = \"T\"\nwhile zapytaj_ponownie == \"T\":\n print(f\"\\n\".join(i for i in lista_programów) + \"\\n\") # rozdzielam elementy listy nową linią i kończę pustą linią\n wybor = input(\"Twój wybór: \")\n if wybor.isalpha():\n wybor_litera = wybor.upper()\n if wybor_litera == \"R\":\n from random import randint\n los = randint(1, 11) # losowy wybór programu\n wybor = str(los) # zamiana na str, ponieważ funkcja isdigit() w kolejnym if działa tylko dla str\n info = f\"Wybrałeś losowy wybór programu. Przejdziesz teraz do zadania {wybor}\\t\"\n elif wybor_litera == \"X\":\n info = \"Koniec. Dzięki, że tu zajrzałeś 😉\"\n print(info)\n break\n else:\n info = \"Twój wybór jest niepoprawny\"\n print(info)\n if wybor.isdigit():\n wybor_liczba = int(wybor)\n if wybor_liczba == 1:\n rysowanie_prostokata()\n elif wybor_liczba == 2:\n rysowanie_piramidy()\n elif wybor_liczba == 3:\n kalkulator_celsjusz_fahenheit()\n elif wybor_liczba == 4:\n kalkulator_fahenheit_celsjusz()\n elif wybor_liczba == 5:\n cyfra_pierwsza_ostatnia()\n elif wybor_liczba == 6:\n binara_na_dziesietna()\n elif wybor_liczba == 7:\n rok_przestepny()\n elif wybor_liczba == 8:\n kalkulator_pieskie_lata()\n elif wybor_liczba == 9:\n odczyty_temperatury()\n elif wybor_liczba == 10:\n rozmieniarka_pieniedzy()\n elif wybor_liczba == 11:\n rysowanie_listy()\n else:\n info = \"Twój wybór jest niepoprawny\"\n print(info)\n zapytaj_ponownie = input(\"\\nCzy chcesz uruchomić inny program? [T/N]\").upper()\n info = \"Koniec. Dzięki, że tu zajrzałeś 😉\"\n print(info)\n","sub_path":"day4/day4_hom.py","file_name":"day4_hom.py","file_ext":"py","file_size_in_byte":16874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"208771222","text":"import pandas as pd\nimport numpy as np\n\n\ndef generator(data, lookback, delay, min_index, max_index, shuffle=False, batch_size=128, step=6):\n if max_index is None:\n max_index = len(data) - delay - 1\n i = min_index + lookback\n while 1:\n if shuffle:\n rows = np.random.randint(min_index + lookback, max_index, size=batch_size)\n else:\n if i + batch_size >= max_index:\n i = min_index + lookback\n rows = np.arange(i, min(i + batch_size, max_index))\n i += len(rows)\n samples = np.zeros((len(rows), lookback // step, data.shape[-1]))\n targets = np.zeros((len(rows),))\n for j, row in enumerate(rows):\n indices = range(rows[j] - lookback, rows[j], step)\n samples[j] = data[indices]\n targets[j] = data[rows[j] + delay][1]\n yield samples, targets\n\n\nlookback = 10\nstep = 1\ndelay = 5\nbatch_size = 5\n\na = np.arange(50)\nb = np.arange(100, 150)\ndata = pd.DataFrame({'a':a, 'b':b})\n\ntrain_gen = generator(data.values, lookback=lookback, delay=delay, min_index=0, max_index= len(data)-1,\n shuffle=True, step=step, batch_size=batch_size)\n\nfor i in train_gen:\n print(i)\n","sub_path":"common_NN.py","file_name":"common_NN.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"603581208","text":"import io\nimport os\nimport os.path as osp\nimport shutil\nimport warnings\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcv.fileio import FileClient\nfrom torch.nn.modules.utils import _pair\n\nfrom ...utils import get_random_string, get_shm_dir, get_thread_id\nfrom ..registry import PIPELINES\n\nfrom .loading import SampleFrames\n\n@PIPELINES.register_module()\nclass SampleWeVideoFrames(SampleFrames):\n\n def __init__(self, clip_len, frame_interval=2, test_mode=False):\n\n super().__init__(clip_len, frame_interval, test_mode=test_mode)\n\n def _get_clips(self, center_index, skip_offsets, shot_info):\n start = center_index - (self.clip_len // 2) * self.frame_interval\n end = center_index + ((self.clip_len + 1) // 2) * self.frame_interval\n frame_inds = list(range(start, end, self.frame_interval))\n if not self.test_mode:\n frame_inds = frame_inds + skip_offsets\n frame_inds = np.clip(frame_inds, shot_info[0], shot_info[1] - 1)\n return frame_inds\n\n def __call__(self, results):\n fps = results['fps']\n timestamp = results['timestamp']\n timestamp_start = results['timestamp_start']\n timestamp_end = results['timestamp_end']\n shot_info = results['shot_info']\n\n num_frames = fps * (timestamp_end - timestamp_start)\n if num_frames < self.frame_interval * self.clip_len + 1: # +1 for safety\n return None\n\n if timestamp:\n timestamp = int(timestamp)\n center_index = fps * (timestamp - timestamp_start) + 1\n else:\n center_index = np.random.randint(\n low=self.clip_len//2*self.frame_interval,\n high=max(num_frames-self.clip_len//2*self.frame_interval, self.clip_len//2*self.frame_interval+1)\n )\n skip_offsets = np.random.randint(\n -self.frame_interval // 2, (self.frame_interval + 1) // 2,\n size=self.clip_len)\n frame_inds = self._get_clips(center_index, skip_offsets, shot_info)\n\n results['frame_inds'] = np.array(frame_inds, dtype=np.int)\n results['clip_len'] = self.clip_len\n results['frame_interval'] = self.frame_interval\n results['num_clips'] = 1\n results['crop_quadruple'] = np.array([0, 0, 1, 1], dtype=np.float32)\n return results\n\n def __repr__(self):\n repr_str = (f'{self.__class__.__name__}('\n f'clip_len={self.clip_len}, '\n f'frame_interval={self.frame_interval}, '\n f'test_mode={self.test_mode})')\n return repr_str\n\n","sub_path":"mmaction/datasets/pipelines/wevideo_loading_factory.py","file_name":"wevideo_loading_factory.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"234511062","text":"import uuid\nfrom .utils import *\nfrom pyspark.ml.feature import *\nimport pandas as pd\nimport numpy as np\nimport pyspark.sql.types as spk_type\nimport pyspark.sql.types as t\nimport pyspark.sql.functions as spk_func\nimport pyspark.sql.functions as f\nfrom pyspark.sql import *\nfrom pyspark import *\nimport os\nimport sys\nfrom timeit import default_timer as timer\nimport logging\nimport shutil\n\n\nclass Encoder:\n def __init__(self, proc):\n self.op_name = \"Encoder\"\n self.uuid = proc.uuid\n self.tmp_id = proc.tmp_id\n self.path_prefix = proc.path_prefix\n self.current_path = proc.current_path\n self.dicts_path = proc.dicts_path\n self.spark = proc.spark\n self.tmp_materialzed_list = []\n\n def transform(self, train, valid, train_only=True):\n raise NotImplementedError(\"This is base Encoder class\")\n\n def materialize(self, df, df_name=\"materialized_tmp\"):\n tmp_id = self.tmp_id\n self.tmp_id += 1\n save_path = \"\"\n if df_name == \"materialized_tmp\":\n save_path = \"%s/%s/tmp/%s-%s-%d\" % (\n self.path_prefix, self.current_path, df_name, self.uuid, tmp_id)\n self.tmp_materialzed_list.append(save_path)\n else:\n save_path = \"%s/%s/%s\" % (self.path_prefix,\n self.current_path, df_name)\n df.write.format('parquet').mode('overwrite').save(save_path)\n return self.spark.read.parquet(save_path)\n\n\nclass TargetEncoder(Encoder):\n def __init__(self, proc, x_col_list, y_col_list, out_col_list, out_name, out_dtype=None, y_mean_list=None, smooth=20, seed=42):\n super().__init__(proc)\n self.op_name = \"TargetEncoder\"\n self.x_col_list = x_col_list\n self.y_col_list = y_col_list\n self.out_col_list = out_col_list\n self.out_dtype = out_dtype\n self.out_name = out_name\n self.y_mean_list = y_mean_list\n self.seed = seed\n self.smooth = smooth\n self.expected_list_size = len(y_col_list)\n if len(self.out_col_list) < self.expected_list_size:\n raise ValueError(\"TargetEncoder __init__, input out_col_list should be same size as y_col_list\") \n if y_mean_list != None and len(self.y_mean_list) < self.expected_list_size:\n raise ValueError(\"TargetEncoder __init__, input y_mean_list should be same size as y_col_list\") \n\n def transform(self, df):\n x_col = self.x_col_list\n cols = ['fold', x_col] if isinstance(x_col, str) else ['fold'] + x_col\n agg_per_fold = df.groupBy(cols)\n agg_all = df.groupBy(x_col)\n\n per_fold_list = []\n all_list = []\n\n for i in range(0, self.expected_list_size):\n y_col = self.y_col_list[i]\n per_fold_list.append(f.count(y_col).alias(f'count_{y_col}'))\n per_fold_list.append(f.count(y_col).alias(f'sum_{y_col}'))\n all_list.append(f.count(y_col).alias(f'count_all_{y_col}'))\n all_list.append(f.count(y_col).alias(f'sum_all_{y_col}'))\n\n agg_per_fold = agg_per_fold.agg(*per_fold_list)\n agg_all = agg_all.agg(*all_list)\n agg_per_fold = agg_per_fold.join(agg_all, x_col, 'left')\n\n for i in range(0, self.expected_list_size):\n y_col = self.y_col_list[i]\n out_col = self.out_col_list[i]\n out_dtype = self.out_dtype\n y_mean = self.y_mean_list[i] if self.y_mean_list != None else None\n \n if y_mean is None:\n y_mean = np.array(df.groupBy().mean(y_col).collect())[0][0]\n mean = float(y_mean)\n smooth = self.smooth\n\n # print(agg_per_fold.dtypes)\n\n # prepare for agg_per_fold\n agg_per_fold = agg_per_fold.withColumn(\n f'count_all_{y_col}', f.col(f'count_all_{y_col}')-f.col(f'count_{y_col}'))\n agg_per_fold = agg_per_fold.withColumn(\n f'sum_all_{y_col}', f.col(f'sum_all_{y_col}')-f.col(f'sum_{y_col}'))\n agg_per_fold = agg_per_fold.withColumn(\n out_col,\n (f.col(f'sum_all_{y_col}') + f.lit(mean) * f.lit(smooth))/(f.col(f'count_all_{y_col}') + f.lit(smooth)))\n agg_all = agg_all.withColumn(\n out_col,\n (f.col(f'sum_all_{y_col}') + f.lit(mean) * f.lit(smooth))/(f.col(f'count_all_{y_col}') + f.lit(smooth)))\n if out_dtype is not None:\n agg_per_fold = agg_per_fold.withColumn(\n out_col, f.col(out_col).cast(out_dtype))\n agg_all = agg_all.withColumn(\n out_col, f.col(out_col).cast(out_dtype))\n agg_per_fold = agg_per_fold.drop(\n f'count_all_{y_col}', f'count_{y_col}', f'sum_all_{y_col}', f'sum_{y_col}')\n agg_all = agg_all.drop(f'count_all_{y_col}', f'sum_all_{y_col}')\n return (self.materialize(agg_per_fold, \"%s/train/%s\" % (self.dicts_path, self.out_name)),\n self.materialize(agg_all, \"%s/test/%s\" % (self.dicts_path, self.out_name)))\n\n\nclass CountEncoder(Encoder):\n def __init__(self, proc, x_col, out_col, seed=42):\n super().__init__(proc)\n self.op_name = \"CountEncoder\"\n self.x_col = x_col\n self.out_col = out_col\n self.seed = seed\n\n def transform(self, train, valid, train_only=True):\n x_col = self.x_col\n out_col = self.out_col\n\n cols = [x_col] if isinstance(x_col, str) else x_col\n agg_all = train.groupby(cols).count(\n ).withColumnRenamed('count', out_col)\n agg_test = valid.groupby(cols).count().withColumnRenamed(\n 'count', out_col+'_valid')\n\n agg_test_size = agg_test.count()\n if agg_test_size > 30000000:\n agg_all = agg_all.join(agg_test.hint(\n 'shuffle_hash'), cols, how='left')\n else:\n agg_all = agg_all.join(f.broadcast(agg_test), cols, how='left')\n agg_all = agg_all.fillna(0, out_col+'_valid')\n agg_all = agg_all.withColumn(\n out_col, f.col(out_col)+f.col(out_col+'_valid'))\n agg_all = agg_all.drop(out_col+'_valid')\n agg_all.cache()\n\n train_out = (cols, self.materialize(\n agg_all, \"train/%s\" % out_col), 0)\n if train_only == False:\n valid_out = (cols, self.materialize(\n agg_all, \"valid/%s\" % out_col), 0)\n else:\n valid_out = ()\n return (train_out, valid_out)\n\n\nclass FrequencyEncoder(Encoder):\n def __init__(self, proc, x_col, out_col, seed=42):\n super().__init__(proc)\n self.op_name = \"FrequencyEncoder\"\n self.x_col = x_col\n self.out_col = out_col\n self.seed = seed\n\n def transform(self, train, valid, train_only=True):\n x_col = self.x_col\n out_col = self.out_col\n length_train = train.count()\n length_valid = valid.count()\n\n cols = [x_col] if isinstance(x_col, str) else x_col\n agg_all_train = train.groupby(cols).count(\n ).withColumnRenamed('count', out_col)\n agg_all_train = agg_all_train.withColumn(out_col, f.col(\n out_col).cast(spk_type.IntegerType()))\n agg_all_train = agg_all_train.withColumn(\n out_col, f.col(out_col)*1.0/length_train)\n agg_all_train = agg_all_train.withColumn(out_col, f.col(\n out_col).cast(spk_type.FloatType()))\n\n agg_all_valid = valid.groupby(cols).count(\n ).withColumnRenamed('count', out_col)\n agg_all_valid = agg_all_valid.withColumn(out_col, f.col(\n out_col).cast(spk_type.IntegerType()))\n agg_all_valid = agg_all_valid.withColumn(\n out_col, f.col(out_col)*1.0/length_valid)\n agg_all_valid = agg_all_valid.withColumn(out_col, f.col(\n out_col).cast(spk_type.FloatType()))\n\n train_out = (cols, self.materialize(\n agg_all_train, \"train/%s\" % out_col), 0)\n if train_only == False:\n valid_out = (cols, self.materialize(\n agg_all_valid, \"valid/%s\" % out_col), 0)\n else:\n valid_out = ()\n return (train_out, valid_out)\n","sub_path":"pyrecdp/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":8158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"433138853","text":"import zmq\nfrom time import sleep\n\n# Socket to talk to clients.\ncontext = zmq.Context()\nsocket = context.socket(zmq.PUB)\nsocket.bind(\"tcp://*:6666\")\n\n# Publisher thread loop.\nwhile True:\n socket.send_multipart([\"PRESSURE\", \"10000.00\"])\n sleep(1)\n\n# We never get here but clean up anyhow.\nsocket.close()\ncontext.term()\n","sub_path":"demo_02/publisher2.py","file_name":"publisher2.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"2831193","text":"class Engine():\n \"\"\" Handles all speed and velocity calculations \"\"\"\n def __init__(self):\n self.acceleration = 12\n self.angle = 0\n self.vel_x = 0.0\n self.vel_y = 0.0\n self.max_vel_x = 500\n self.max_vel_y = 500\n\n def accelerate(self, dt, directions, moving):\n # calculate velocity on x and y based on the angle\n dx, dy = directions\n self.vel_x += dx * self.acceleration\n self.vel_y += dy * self.acceleration\n if abs(self.vel_x) > self.max_vel_x:\n self.vel_x = self.max_vel_x * dx\n if abs(self.vel_y) > self.max_vel_y:\n self.vel_y = self.max_vel_y * dy\n\n # apply friction\n if not moving:\n self.vel_x *= 0.50\n self.vel_y *= 0.50\n","sub_path":"src/game_objects/components/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"484401810","text":"import random\n\nimport requests\nimport vk_api\nfrom config import *\n\n\ndef write_msg(user_id, text):\n vk_bot.method('messages.send', {'user_id': user_id, 'message': text, 'random_id': random.randint(0, 1000)})\n\n\nvk_bot = vk_api.VkApi(token=TOKEN)\nlong_poll = vk_bot.method('messages.getLongPollServer', {'need_pts': 1, 'lp_version': 3})\nserver, key, ts = long_poll['server'], long_poll['key'], long_poll['ts']\nprint(\"готов к работе\")\n# + str(long_poll))\n\nwhile True:\n long_poll = requests.get(\n 'https://{server}?act={act}&key={key}&ts={ts}&wait=2000'.format(server=server,\n act='a_check',\n key=key,\n ts=ts)).json()\n update = long_poll['updates']\n if update[0][0] == 4:\n if update[0][6] == 'стратегия':\n print(update)\n user_id = update[0][3]\n user_name = vk_bot.method('users.get', {'user_ids': user_id})\n write_msg(user_id, 'поиграй в League of Legends')\n user_id = update[0][3]\n user_name = vk_bot.method('users.get', {'user_ids': user_id})\n write_msg(user_id, 'привет , ' + (user_name[0]['first_name']) + 'какой жанр игр предпочитаешь?') # сообщение пользователю\n print(str(user_name[0]['first_name']) + ' ' +\n str(user_name[0]['last_name']) + ' написал(а) боту - ' + str(update[0][6])) # сообщение нам\n # Меняем ts для следующего запроса\n ts = long_poll['ts']\n","sub_path":"run_bot.py","file_name":"run_bot.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"490780141","text":"from typing import List\n\n\nclass Solution:\n def numUniqueEmails(self, emails: List[str]) -> int:\n result = set()\n for email in emails:\n email2 = []\n for i in range(len(email)):\n ch = email[i]\n if ch == \".\":\n continue\n if ch == \"+\" or ch == \"@\":\n email2.extend(email[email.index(\"@\"):])\n break\n email2.append(ch)\n result.add(\"\".join(email2))\n return len(result)","sub_path":"leetcode/p0929_unique_email_addresses/my_attempt.py","file_name":"my_attempt.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"126282694","text":"#!/usr/bin/python3\n# Bodoki-Halmen Zsolt\n# bzim1700\n# 531/1\n\nimport tree as t\nimport struct as s\nfrom argparse import ArgumentParser\nfrom pathlib import Path\n\ndef zero_pad(filename):\n byte = filename.encode()\n n = len(byte)\n if n < 64:\n pad = b'\\x00'*(64-n)\n byte += pad\n elif n > 64:\n splitted = filename.split('.')\n extension = splitted[len(splitted) - 1]\n m = 64 - len(extension) - 1\n filename = filename[0:m] + '.' + extension\n byte = filename.encode()\n return byte\n\ndef strip_padding(byte):\n return byte.decode().split('\\x00')[0]\n\ndef loading(verbose, i, n):\n if verbose:\n percent = i/(n - 1)\n current = int(percent*30)\n string = '['\n for x in range(30):\n if x <= current:\n string += '*'\n else:\n string += ' '\n string += ']'\n print(string, end='')\n if i != (n-1):\n print('\\b'*32, end='')\n else:\n print()\n\ndef log(verbose, message):\n if verbose:\n print(message)\n\ndef encode(output_file, input_file, verbose):\n log(verbose, 'encoding {} as {}'.format(input_file, output_file))\n \n root = t.make_tree([i for i in range(256)])\n \n f = open(output_file, 'wb')\n\n log(verbose, 'encodeing file name')\n f.write(zero_pad(input_file))\n \n log(verbose, 'opening file')\n g = open(input_file, 'rb')\n \n log(verbose, 'encoding')\n i = 0\n n = Path(input_file).stat().st_size\n code = ''\n while True:\n loading(verbose, i, n)\n byte = g.read(1)\n if byte == b'':\n break\n byte = s.unpack('B', byte)[0]\n code += root.encode_character(byte)\n while len(code) >= 8:\n binary = int(code[0:8], 2)\n f.write(s.pack('B', binary))\n code = code[8:]\n i += 1\n g.close()\n if len(code) > 0:\n code += '0'*(8-len(code))\n binary = int(code, 2)\n f.write(s.pack('B', binary))\n f.close()\n\ndef decode(input_file, verbose):\n log(verbose, 'decoding {}'.format(input_file))\n \n root = t.make_tree([i for i in range(256)])\n\n log(verbose, 'opening files')\n f = open(input_file, 'rb')\n filename = strip_padding(f.read(64))\n g = open(filename, 'wb')\n\n log(verbose, 'decoding')\n i = 0\n n = Path(input_file).stat().st_size\n while True:\n loading(verbose, i, n)\n byte = f.read(1)\n if byte == b'':\n break\n code = '{:08b}'.format(s.unpack('B', byte)[0])\n message = root.decode(code)\n for m in message:\n g.write(s.pack('B', m))\n i += 1\n g.close()\n f.close()\n\n################################################################################\n\nif __name__ == '__main__':\n parser = ArgumentParser(description='Adaptive Huffman Encoder/Decoder')\n parser.add_argument('-d', \n '--decode', \n nargs=1,\n metavar='file',\n help='decode file')\n parser.add_argument('-e', \n '--encode', \n nargs=2, \n metavar='file',\n help='encode')\n parser.add_argument('-v', \n '--verbose', \n action='store_true')\n\n args = parser.parse_args()\n if args.encode == None and args.decode == None:\n parser.parse_args(['-h'])\n elif args.encode != None and args.decode == None:\n encode(args.encode[0], args.encode[1], args.verbose)\n elif args.decode != None and args.encode == None:\n decode(args.decode[0], args.verbose)\n","sub_path":"lab03/ahm.py","file_name":"ahm.py","file_ext":"py","file_size_in_byte":3688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"43842078","text":"from FAdo.reex import *\nfrom FAdo.fa import *\nfrom FAdo.fio import *\nfrom parseTree import *\nimport copy\n\ndef isPrune(s, examples):\n return isPDead(s,examples) or isNDead(s, examples)\n\ndef isPDead(s, examples):\n s = copy.deepcopy(s)\n s.spreadAll(KleenStar(Or(Character('0'),Character('1'))))\n it = iter(examples.getPos())\n for i in it:\n if not str2regexp(repr(s)).evalWordP(i):\n return True\n return False\n\ndef isNDead(s, examples):\n s = copy.deepcopy(s)\n #s.spreadAll(Epsilon())\n s.spreadNp()\n if not bool(repr(s).strip()):\n return False\n it = iter(examples.getNeg())\n for i in it:\n if str2regexp(repr(s)).evalWordP(i):\n return True\n return False\n\n\ndef isRedundant(s):\n s = unroll(s)\n it = iter(examples.getPos())\n for i in it:\n if not s.evalWordP(i):\n return True\n return False\n\n\ndef split(s) :\n before = [s]\n after = []\n i = 0\n idx = s.find(\"+\")\n if idx != 0:\n x = s.rfind(\"(\", 0, idx)\n split(s)\n split(s)\n else:\n return s\n\n\ndef unroll(s):\n i = 0\n idx = s.find(\"*\")\n while idx != -1:\n x = s.rfind(\"(\", 0, idx)\n mul = s.rfind(\"*\", 0, idx)\n if mul != -1 and x < mul:\n x = s.rfind(\"(\", 0, x - 1)\n x = s.rfind(\"(\", 0, x - 1)\n x = s.rfind(\"(\", 0, x - 1)\n\n s = s[:idx] + s[x:idx] + s[x:idx] + s[idx:]\n i = idx + 2 * (idx - x) + 1\n idx = s.find(\"*\", i)\n return s\n","sub_path":"prune.py","file_name":"prune.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"391844780","text":"from server import setup_app\nimport pytest\npytest_plugins = 'aiohttp.pytest_plugin'\n\nfrom faker import Faker\nfrom random import randint\nimport logging\nimport json\n\nlogging.basicConfig(level=logging.DEBUG)\nlog = logging.getLogger(__name__)\nfaker = Faker() # fake user data generator\n\n\n@pytest.fixture\ndef client(loop, test_client):\n return loop.run_until_complete(test_client(setup_app))\n\n\nasync def jsonreq(client, route_name, data=None, query=None, **parts):\n \"\"\" A little helper to make reverse URL mappings less painful in aiohttp 0.21+ \"\"\"\n\n app = client.app\n resource = app.router[route_name]\n method = resource._routes[0].method\n if 'formatter' in resource.get_info():\n uri = resource.url(parts=parts, query=query)\n else:\n uri = resource.url(query=query)\n log.debug(\"URI resolved to %s\", uri)\n json_data = json.dumps(data)\n resp = await client.request(method, uri, data=json_data)\n assert resp.status == 200\n json_resp = await resp.json()\n return json_resp\n\n\ndef gen_fake_user():\n name = faker.name()\n fname, lname = name.split(maxsplit=1)\n emails = [faker.email() for _ in range(randint(2, 3))]\n data = {'fname': fname, 'lname': lname, 'emails': emails}\n log.debug(\"new fake user: %s\", data)\n return data\n\n\nasync def test_basic_operations(client):\n # create new person\n person1 = await jsonreq(client, 'person_put', gen_fake_user())\n\n # can we retrieve what we just created?\n person2 = await jsonreq(client, 'person_get', id=person1['id'])\n assert person2 == person1, \"created and retrevied objects do not match\"\n\n # okay, add some new person\n person3 = await jsonreq(client, 'person_put', gen_fake_user())\n\n # create a few group\n group1 = await jsonreq(client, 'group_put', {\"name\": \"some group\"})\n group2 = await jsonreq(client, 'group_put', {\"name\": \"nother group\"})\n\n # create address_book\n address_book = await jsonreq(client, 'addressbook_put', {\"name\": \"some address_book\"})\n\n # add a person to the address_book\n address_book = await jsonreq(client, 'addressbook_add', person1, id=address_book['id'], field=\"people\")\n assert person1['id'] in address_book['people']\n assert person3['id'] not in address_book['people']\n\n # get a person with a list of groups\n person1 = await jsonreq(client, 'person_get', gen_fake_user(), id=person1['id'])\n assert address_book['id'] in person1['address_books']\n\n # add a group to the address_book\n address_book = await jsonreq(client, 'addressbook_add', group1, id=address_book['id'], field=\"groups\")\n assert group1['id'] in address_book['groups']\n assert group2['id'] not in address_book['groups']\n\n\nasync def test_search_by_name(client):\n # check that we can search by first name\n person = await jsonreq(client, 'person_put', dict(fname=\"_fname\", lname=\"_lname\", emails=[\"test@example.com\"]))\n people = await jsonreq(client, 'person-find', query=dict(fname=\"_fname\"))\n assert len(people) == 1 and person in people\n\n # check search by last name\n people = await jsonreq(client, 'person-find', query=dict(lname=\"_lname\"))\n assert len(people) == 1 and person in people\n\n # check search by first and last names\n people = await jsonreq(client, 'person-find', query=dict(fname=\"_fname\", lname=\"_lname\"))\n assert len(people) == 1 and person in people\n\n\nasync def test_search_by_email(client):\n person = await jsonreq(client, 'person_put', dict(fname=\"somename\", lname=\"somesurname\", emails=[\"another@example.com\"]))\n people = await jsonreq(client, 'person-find-by-email', query=dict(email=\"another@example.com\"))\n assert len(people) == 1 and person in people\n","sub_path":"server_test.py","file_name":"server_test.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"493684371","text":"import asyncio,datetime,multiprocessing,json,traceback\r\nfrom elasticsearch import Elasticsearch\r\nfrom utils.es import *\r\nfrom utils.ldap import *\r\nfrom urllib import request\r\nfrom conf import ldap_config,es_config,sync_objectClass\r\n\r\ndef request_data(gen):\r\n #request data\r\n try:\r\n data=next(gen)\r\n return data\r\n except StopIteration:\r\n return 0\r\n except ValueError:\r\n return 1\r\n\r\nasync def index(loop,keys,gen,objectClass,es):\r\n print(objectClass+' : start a coroutine')\r\n count=1\r\n while True:\r\n # await for data\r\n infolist = await loop.run_in_executor(None,request_data,gen)\r\n if infolist == 0:\r\n #StopIteration,no more data\r\n print(objectClass+' : job finished at : '+str(datetime.datetime.now()))\r\n break\r\n elif infolist == 1:\r\n #generator ValueError,means generator is now generating data,sleep 10s and switch to another coroutine\r\n await asyncio.sleep(10)\r\n else:\r\n print(objectClass+' : now indexing page '+str(count))\r\n count+=1\r\n #index data by bulk,this will take a little time\r\n bulk_insert(es,infolist,keys,objectClass)\r\n\r\nasync def run(loop,keys,gen,objectClass,es):\r\n tasks=[]\r\n # 3 coroutine by default\r\n for i in range(3):\r\n tasks.append(asyncio.ensure_future(index(loop,keys,gen,objectClass,es),loop=loop))\r\n await asyncio.gather(*tasks)\r\n\r\ndef main(objectClass):\r\n es = Elasticsearch(es_config['ip'], port=es_config['port'])\r\n # create ldap connection\r\n ldap_conn = connect(**ldap_config)\r\n try:\r\n #if the index is existing by detect elasticsearch\r\n url='http://'+str(es_config['ip'][0])+':'+str(es_config['port'])+'/'+str(objectClass)+'/_mapping?'\r\n r=request.urlopen(url)\r\n re=json.loads(bytes.decode(r.read()))\r\n #get fields from elasticsearch for filter ldap\r\n keys=list(re[objectClass]['mappings']['data']['properties'].keys())\r\n try:\r\n keys=keys.remove('doc')\r\n except:\r\n pass\r\n print('useful keys : {}'.format(str(keys)))\r\n except:\r\n # get first page info,20000 data by default\r\n keygen = page_data(ldap_conn,ldap_config['basedn'], objectClass,page_size=20000)\r\n first_page = next(keygen)\r\n # get the useful ldap items,this item list will be used to create elasticsearch mapping and filter ldap data\r\n keys = get_keys(first_page,objectClass)\r\n print('useful keys : {}'.format(str(keys)))\r\n # organize mapping structure\r\n mapping = mapping_structure(keys)\r\n print('mapping is : {}'.format(str(mapping)))\r\n create_mapping(es, objectClass, mapping)\r\n # paged ldap data generator,2000 data by default\r\n datagen = page_data(ldap_conn, ldap_config['basedn'], objectClass, attributes=keys)\r\n # loop\r\n loop = asyncio.get_event_loop()\r\n loop.run_until_complete(run(loop, keys, datagen,objectClass,es))\r\n\r\nif __name__ == '__main__':\r\n print('start at : '+str(datetime.datetime.now()))\r\n for obj in sync_objectClass:\r\n p=multiprocessing.Process(target=main, args=(obj,))\r\n p.start()\r\n print('start process for '+str(obj))","sub_path":"sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"608549931","text":"\n# 练习:\n# 定义一个 '人' (Human) 类\n# class Human:\n# def set_info(self, n, a, addr='不详'):\n# '''此方法用来给对象添加'姓名','年龄',\n# '家庭住址'属性'''\n# ... 此处自己实现\n# def show_info(self):\n# '''此处显示此人的信息'''\n# ... 此处自己实现\n# s1 = Human()\n# s1.set_info(\"小张\", 20, '北京市东城区')\n# s2 = Human()\n# s2.set_info(\"小李\", 18)\n# s1.show_info() # 小张 今年 20 岁,家庭住址: 北京市东城区\n# s2.show_info() # 小李 今年 18 岁,家庭住址: 不详\n# class Human:\n# def set_info(self,name,age,address='不详'):\n# self.name=name\n# self.age=age\n# self.address=address\n# def show_info(self):\n# print(self.name,\"今年:\",self.age,\"家庭地址:\",self.address)\n# h1=Human()\n# h1.set_info(\"小张\",20,\"北京朝阳区\")\n# h2=Human()\n# h2.set_info(\"小李\",20)\n# h1.show_info()\n# h2.show_info()\n\n\n# 练习:\n# 1. 写一个学生类Student类.此类用于描述学生信息,学生信息有:\n# 姓名,年龄,成绩(默认为0)\n# 1) 为该类添加初始化方法,实现在创建对象时自动设置:\n# 姓名(name),年龄(age), 成绩(score)属性\n# 2) 添加set_score方法能力对象修改成绩信息\n# 3) 添加show_info方法打印学生对象的信息\n# 如:\n# class Student:\n# def __init__(self,name,age,score=0):\n# self.name=name\n# self.age=age\n# self.score=score\n# def set_score(self, score):\n# self.score=score\n# def show_info(self):\n# print(\"姓名:\",self.name,\"年龄:\",self.age,\"成绩:\",self.score)\n# L = []\n# L.append(Student(\"小张\", 20, 100))\n# L.append(Student(\"小李\", 18, 98))\n# L.append(Student(\"小菜\", 19))\n# print(L)\n# L[-1].set_score(70)\n# for s in L:\n# s.show_info() # 列出所有学生的信息\n# print(L)\n\n\n# 练习:\n# 有两个人:\n# 1. 姓名: 张三, 年龄: 35\n# 2. 姓名: 李四, 年龄: 10\n# 行为:\n# 1. 教别人学东西 teach\n# 2. 工作赚钱 work\n# 3. 借钱 borrow\n# 4. 显示自己的信息 show_info\n# 事情:\n# 张三 教 李四 学 python\n# 李四 教 张三 学 王者荣耀\n# 张三 上班赚钱 1000 元\n# 李四 向 张三 借钱 200元\n# 35 岁的 张三 有钱 800 元,它学会的技能是: 王者荣耀\n# 10 岁的 李四 有钱 200 元,它学会的技能是: python\n# 类的封装如下:\nclass Human:\n def __init__(self,name,age):\n self.name=name\n self.age=age\n self.money=0\n self.skill=[]\n def teach(self,other,subject):\n print(self.name,\"教\",other.name,\"学\",subject)\n other.skill.append(subject)\n def work(self,money):\n print(self.name,\"上班赚钱\",money,\"元\")\n self.money+=money\n def borrow(self,other,money):\n print(self.name,\"向\",self.name,\"借\",money,\"元\")\n self.money-=money\n def show_info(self):\n print(self.age,\"的\",self.name,\"有钱\",self.money,\"他学会的技能是:\",self.skill)\nzhang3 = Human(\"张三\", 35)\nli4 = Human(\"李四\", 10)\n\n# .... 此处描述事情的过程\nzhang3.teach(li4,\"python\")\nli4.teach(zhang3,\"王者荣耀\")\nzhang3.work(1000)\nli4.borrow(zhang3,200)\nzhang3.show_info()\nli4.show_info()","sub_path":"python3/day17/lianxi.py","file_name":"lianxi.py","file_ext":"py","file_size_in_byte":3330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"101047720","text":"from pandas import DataFrame,Series\nimport numpy as np\nimport os\nfrom context.resource_manager import Properties\nfrom tools import logger\n\nlog=logger.getLogger()\n\ndef _conv(o):\n x = np.array(o, dtype='|S4')\n y = x.astype(np.float)\n return y\n\n\ndef get_data_from_xml(path=Properties.getImageXmlResource()):\n \"\"\"\n 解析xml文件,获得对应的id,data,作为运算的基础\n :param path:\n :return: list, NumpArray 用于显示控制与用于计算\n list is used to make the index to location the value\n \"\"\"\n log.info(\"starting running compute_distance_from_xml function.\")\n from context.resource_manager import Properties\n from pandas import DataFrame,Series\n path=os.path.join(Properties.getRootPath(),Properties.getImageXmlResource())\n from xml.dom.minidom import parse,parseString\n images=parse(path)\n id=[]\n data=[]\n for node in images.getElementsByTagName(\"Image\"):\n idNode=node.getElementsByTagName(\"id\")[0].childNodes[0].data\n id.append(idNode)\n dataNode=node.getElementsByTagName(\"data\")[0].childNodes[0].data\n dataNode=dataNode[1:-1].split(',')\n data.append(dataNode)\n id=np.asarray(id)\n id=id.tolist()\n data=np.asarray(data)\n data=np.asarray(list(map(_conv,data)),dtype=np.float)\n return id,data\n\n\n\nclass Image:\n def __init__(self):\n self.id=\"\"\n self.data=\"\"","sub_path":"pk/data/bean/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"181266890","text":"from datetime import timedelta\nimport logging\nimport time\n\nfrom humanize import precisedelta\nfrom scipy import fft\nfrom scipy import integrate\nimport numpy\n\n\ndef _report_progress(n, nt, timer_start):\n \"\"\"\n Report integration progress to the log.\n \"\"\"\n\n progress = (n / nt)\n timer_current = time.time()\n timer_elapsed = timer_current - timer_start\n timer_left = timer_elapsed * (1 - progress) / progress\n\n # Format timers to human-readable strings (hence 'hr_').\n hr_elapsed = precisedelta(\n timedelta(seconds=timer_elapsed),\n minimum_unit=\"seconds\",\n format=\"%02d\")\n hr_left = precisedelta(\n timedelta(seconds=timer_left),\n minimum_unit=\"seconds\",\n format=\"%02d\")\n\n logging.info(\n \" %5.2f%% %s elapsed %s left\",\n 100 * progress, hr_elapsed, hr_left)\n\n\nclass IntegrationResult:\n \"\"\"\n This is a container class for the integration results.\n\n Parameters\n ----------\n t : array_like\n an array of time points where the integrated function is\n evaluated\n x : array_like\n the coordinate grid\n k : array_like\n the frequency grid\n u : array_like of shape len(t)×len(x)\n integrated field in time domain evaluated on a grid\n v : array_like of shape len(t)×len(x)\n integrated field in frequency domain evaluated on a grid\n successful : bool\n a flag indicating that the integration was successful\n error_code : int\n return code of the solver if the integration failed\n\n Note\n ----\n `u` and `v` are output matrices of the integrated field in\n coordinate-domain and spectral-domain representations. Row number\n is position in the time array, column number is the position in\n coordinate/frequency array.\n \"\"\"\n\n def __init__(self, t, x, k, u, v, error_code=None):\n self.t = t\n self.x = x\n self.k = fft.fftshift(k)\n self.u = u\n self.v = v\n self.error_code = error_code\n\n @property\n def successful(self):\n return self.error_code is None\n\n\ndef gnlse(t, x, u0, beta, gamma, nonlin, lin=None, dt=None, gpc=None):\n \"\"\"\n Integrate a GNLSE using the integrating factor method.\n\n This function integrates a generalized version of nonlinear Schrödinger\n equation\n\n ∂ₜ ũ = i β(k) ũ(t, k)\n + i γ(k) F{ N(t, x, u(t, x)) }\n + i F{ L(t, x, u(t, x)) },\n\n where ũ(t, k) is the spectrum of the unknown field, β(k) is a\n dispersive operator, and γ(k) is a gain coefficient. u(t, x) is the\n coordinate-domain representation of the same unknown field, and\n nonolinear operator N(t, x, u(t, x)) is defined in terms of that\n coordinate representation. L(t, x, u(t, x)) is an auxiliary linear\n operator that can be used to introduce an absorbing boundary layer.\n It does not have to have a physical meaning.\n\n The integration is performed using the integrating factor method as\n described by J.M. Dudley & J.R. Taylor in Chapter 3 of Supercontinuum\n Generation in Optical Fibers, CUP 2010. Instead of integrating the\n original equation we resort to integrating a modified version\n\n ∂ₜ v = i γ(k) F{ N(t, x, u(t, x)) }\n + i F{ L(t, x, u(t, x)) },\n\n where v = v(t, k) is the modified spectrum that is defined as\n\n ũ(t, k) = exp(i β(k) t) v(t, k).\n\n The modifed equation is supposed to be non-stiff, which allows us to\n apply almost any third-party solver. We chose a scipy-provided wrapper\n of ZVODE solver from ODEPACK, which offers error control and an\n adaptive scheme for step-size selection.\n\n Parameters\n ----------\n t : array_like\n an array of time points where the integrated function should\n be evaluated\n x : array_like\n the coordinate grid\n u0 : array_like\n the initial condition at t[0]\n beta : callable with the signature of beta(f)\n the dispersive profile as a function of frequency\n gamma : callable with the signature of gamma(f)\n frequency-dependent gain part of the nonlinear operator\n nonlin : callable with the signature of nonlin(t, x, u)\n time-domain part of the nonlinear operator\n lin : callable with the signature of lin(t, x, u)\n time-domain linear operator\n gpc : callable with the signature of gcp(t, x, f, u, v)\n an optional callback executed after computing up to the next\n grid point\n\n Returns\n -------\n result : an instance of IntegrationResult\n \"\"\"\n\n # Pre-allocate the output matrices.\n nt = len(t)\n nx = len(x)\n\n u = numpy.zeros((nt, nx), dtype=complex)\n v = numpy.zeros((nt, nx), dtype=complex)\n\n # Put the initial conditions in time and frequency domains into\n # the output matrices\n v0 = fft.ifft(u0)\n u[0, :] = u0\n v[0, :] = fft.fftshift(v0)\n\n # Prepare the frequency scale and evaluate beta on the scale\n k = 2 * numpy.pi * fft.fftfreq(nx, x[1] - x[0])\n D = beta(k)\n G = gamma(k)\n\n # Prepare the RHS we feed to the solver\n def rhs(t_, v_):\n # Scale the spectrum by the accumulated phase shift due to\n # the dispersion and transform to coordinate space\n exp = numpy.exp(1j * D * (t_ - t[0]))\n u_ = fft.fft(exp * v_)\n\n # Apply nonlinear operator N() and, maybe, linear operator L()\n # as well, transform back to the modified spectrum and return.\n ret = G * fft.ifft(nonlin(t, x, u_))\n if lin:\n ret += fft.ifft(lin(t, x, u_))\n\n return 1j / exp * ret\n\n # Configure the solver. We pick ZVODE from ODEPACK which provides\n # error control and adaptive stepping out of the box. Since we are\n # working on a modified spectrum, we can hope the problem to be\n # non-stiff, but it's a good idea *not* to impose the choice of\n # the integration method and error tolerances -- this will be\n # estimated by the solver itself.\n ode = integrate.ode(rhs)\n ode.set_integrator(\"zvode\", rtol=1E-6)\n\n # Those are the internal loop variables. `t_` holds the current\n # integration time, `v_` is the current modified spectrum.\n t_ = t[0]\n v_ = v0\n\n timer_start = time.time()\n logging.info(\"Integrating:\")\n\n for n in range(1, nt):\n _report_progress(n, nt, timer_start)\n\n # Pick a step size. THIS IS NOT THE ADAPTIVE STEP-SIZE CONTROL\n # --- it is done by the solver. But sometimes the time step\n # given by the time grid is too coarse for the given precision\n # and the solver exhaust maximum number of integration steps\n # while integrating the equation over the grid step. Providing\n # a finer time grid will result in larger output matrices, and\n # for long simulations those might not fit in the memory\n # anymore. Therefore we provide an additional work-around:\n # user can specify an intermediate time step that is used\n # internally by the loop but not saved anywhere.\n dt_ = dt or t[n] - t_\n\n while t_ < t[n]:\n # Set the initial value for the current integration step.\n ode.set_initial_value(v_, t_)\n\n # Pick the next step -- either dt_ or until the next grid\n # point.\n if t_ + dt_ > t[n]:\n t_ = t[n]\n else:\n t_ += dt_\n\n # Integrate for one step and check the solver status.\n v_ = ode.integrate(t_)\n if not ode.successful():\n return IntegrationResult(\n t, x, k, u, v, error_code=ode.get_return_code())\n\n # Calculate the proper spectrum and then save the spectrum and\n # the coordinate representation into the output matrices.\n exp = numpy.exp(1j * D * (t_ - t[0]))\n u[n, :] = fft.fft(exp * v_)\n v[n, :] = fft.fftshift(exp * v_)\n\n if gpc:\n gpc(t, x, k, u, v)\n\n logging.info(\"Done!\")\n return IntegrationResult(t, x, k, u, v)\n","sub_path":"nlse/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":7995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"626293316","text":"from collections import Counter\n\ndef solution(cards, words):\n answer = []\n \n for word in words:\n count_word = Counter(word)\n flag = True\n for card in cards:\n count_card = Counter(card)\n \n if not count_word & count_card:\n flag = False\n break\n count_word -= count_card\n \n if flag and not count_word:\n answer.append(word)\n \n if answer:\n return answer\n else:\n return [\"-1\"]","sub_path":"job_coding_test/j_codingtest/q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"375977457","text":"# Copyright (c) 2014, Vienna University of Technology (TU Wien), Department\n# of Geodesy and Geoinformation (GEO).\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the Vienna University of Technology - Department of\n# Geodesy and Geoinformation nor the names of its contributors may be used to\n# endorse or promote products derived from this software without specific\n# prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL VIENNA UNIVERSITY OF TECHNOLOGY,\n# DEPARTMENT OF GEODESY AND GEOINFORMATION BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,\n# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n# Author: Thomas Mistelbauer Thomas.Mistelbauer@geo.tuwien.ac.at\n# Creation date: 2014-05-26\n\nfrom cStringIO import StringIO\nimport os\nimport json\nfrom flask import Flask, request, render_template, jsonify, make_response\nfrom flask.ext.cors import CORS\nimport urllib2\nimport numpy as np\nimport pandas as pd\nfrom poets.timedate.dateindex import get_dtindex\nfrom poets.web.overlays import image_bounds\nfrom poets.shape.shapes import Shape\nfrom pytesmo.time_series.anomaly import calc_anomaly, calc_climatology\nimport urlparse\nimport matplotlib as mpl\nmpl.use('Agg')\nfrom shapely.geometry.geo import mapping\nimport matplotlib.pyplot as plt\n\n\ndef curpath():\n \"\"\"\n Gets the current path of the module.\n\n Returns\n -------\n pth : str\n Path of the module.\n \"\"\"\n pth, _ = os.path.split(os.path.abspath(__file__))\n return pth\n\n\ndef to_dygraph_format(self):\n \"\"\"\n Transforms pandas DataFrame to Dygraphs compatible format.\n\n Returns\n -------\n labels : list of str\n Labels of the Dygraphs array.\n values : list\n Values of the Dygraphs array.\n \"\"\"\n\n labels = ['date']\n labels.extend(self.columns.values.tolist())\n data_values = np.hsplit(self.values, self.columns.values.size)\n data_index = self.index.values.astype('M8[s]').tolist()\n data_index = [x.strftime(\"%Y/%m/%d %H:%M:%S\") for x in data_index]\n data_index = np.reshape(data_index, (len(data_index), 1))\n data_values.insert(0, data_index)\n data_values = np.column_stack(data_values)\n values = data_values.tolist()\n\n return labels, values\n\npd.DataFrame.to_dygraph_format = to_dygraph_format\n\n# dest = os.path.join(curpath(), 'static', 'temp')\n\n\nclass ReverseProxied(object):\n '''Wrap the application in this middleware and configure the\n front-end server to add these headers, to let you quietly bind\n this to a URL other than / and to an HTTP scheme that is\n different than what is used locally.\n\n In nginx:\n location /myprefix {\n proxy_pass http://192.168.0.1:5001;\n proxy_set_header Host $host;\n proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n proxy_set_header X-Scheme $scheme;\n proxy_set_header X-Script-Name /myprefix;\n }\n\n :param app: the WSGI application\n '''\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n script_name = environ.get('HTTP_X_SCRIPT_NAME', '')\n if script_name:\n environ['SCRIPT_NAME'] = script_name\n path_info = environ['PATH_INFO']\n if path_info.startswith(script_name):\n environ['PATH_INFO'] = path_info[len(script_name):]\n\n scheme = environ.get('HTTP_X_SCHEME', '')\n if scheme:\n environ['wsgi.url_scheme'] = scheme\n return self.app(environ, start_response)\n\napp = Flask(__name__, static_folder='static', static_url_path='/static',\n template_folder=\"templates\")\napp.config['CORS_HEADERS'] = 'Content-Type'\n\n# app.wsgi_app = ReverseProxied(app.wsgi_app)\n\ncors = CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n\n\ndef start(poet, host='127.0.0.1', port=None, r_host=None, r_port=None,\n url=None, debug=False):\n \"\"\"\n Starts application and sets global variables.\n\n Parameters\n ----------\n poet : Poet()\n Instance of Poet class.\n host : str, optional\n Host that is used by the app, defaults to 127.0.0.1.\n port : int, optional\n Port where app runs on, defaults to 50000.\n r_host : str, optional\n IP of router that is between host and internet.\n r_port : int, optional\n Port of router that is between host and internet.\n debug : bool, optional\n Starts app in debug mode if set True, defaults to False.\n \"\"\"\n\n global p\n global variables\n global dates\n global vmin, vmax, cmap\n global host_gl\n global port_gl\n global url_gl\n\n p = poet\n variables = poet.get_variables()\n\n if port is None:\n port = 5000\n\n if r_host is None:\n host_gl = host\n else:\n host_gl = r_host\n\n if r_port is None:\n port_gl = port\n else:\n port_gl = r_port\n\n if url is not None:\n pa = urlparse.urlparse(url, 'http')\n url_gl = os.path.join(pa.geturl(), '')\n else:\n url_gl = url\n\n if debug:\n app.run(debug=True, use_debugger=True, use_reloader=True, host=host,\n port=port)\n else:\n app.run(host=host, port=port)\n\n\n@app.route('/', methods=['GET', 'POST'])\n@app.route('/&', methods=['GET', 'POST'])\ndef index(**kwargs):\n \"\"\"\n Renders main page of the web application. Generates image arguments needed\n for OpenLayers overlay if parameters `reg` and `var` are set, renders\n entry page if not set.\n \"\"\"\n\n global enddate\n global dates\n global ndate\n\n regions = []\n for i, reg in enumerate(p.regions):\n regions.append({'code': reg, 'name': p.region_names[i]})\n\n if len(kwargs) > 0:\n\n if 'reg' in kwargs:\n region = kwargs['reg']\n if 'var' in kwargs:\n variable = kwargs['var']\n\n for src in p.sources.keys():\n if variable in p.sources[src].get_variables():\n source = p.sources[src]\n\n ndate = source._check_current_date()\n begindate = ndate[region][variable][0]\n enddate = ndate[region][variable][1]\n\n if begindate is None and enddate is None:\n\n error = 'No data available for this dataset.'\n\n return render_template('index.html',\n regions=p.regions,\n sources=p.sources.keys(),\n variables=variables,\n error=error)\n\n d = get_dtindex(p.temporal_resolution, begindate, enddate)\n dates = d.to_pydatetime()\n\n fdates = []\n\n for i, d in enumerate(dates.tolist()):\n dat = {'id': i, 'date': d.strftime('%Y-%m-%d')}\n fdates.append(dat)\n\n lon_min, lon_max, lat_min, lat_max, c_lat, c_lon, _ = \\\n image_bounds(region, p.spatial_resolution, p.shapefile)\n\n if source.valid_range is None:\n vrange = [-999, -999]\n else:\n vrange = source.valid_range\n\n ex1 = (lon_max, lat_min)\n ex2 = (lon_min, lat_max)\n\n return render_template('app.html',\n max=len(dates) - 1,\n coord=[c_lon, c_lat],\n ex1=ex1,\n ex2=ex2,\n region=region,\n source=source.name,\n variable=variable,\n regions=regions,\n variables=variables,\n dates=fdates,\n host=host_gl,\n port=port_gl,\n sp_res=p.spatial_resolution,\n range=vrange,\n url=url_gl,\n subregions=get_subregions(region)\n )\n else:\n return render_template('index.html',\n regions=regions,\n sources=p.sources.keys(),\n variables=variables,\n host=host_gl,\n port=port_gl,\n url=url_gl)\n\n\n@app.route('/_ts/&&&', methods=['GET', 'OPTIONS'])\n@app.route('/_ts/&&&&', methods=['GET', 'OPTIONS'])\ndef get_ts(**kwargs):\n \"\"\"\n Gets time series for selected location, gets anomaly of time series if\n `anom` parameter is passed.\n\n Returns\n -------\n jsonified str\n Time series (anomaly) in Dygraphs compatible json format.\n \"\"\"\n\n anomaly = False\n\n if 'reg' in kwargs:\n region = kwargs['reg']\n if 'src' in kwargs:\n source = p.sources[kwargs['src']].name\n if 'var' in kwargs:\n variable = kwargs['var']\n if 'loc' in kwargs:\n loc = kwargs['loc']\n if 'anom' in kwargs:\n anomaly = True\n\n loc = loc.split(',')\n lonlat = (float(loc[0]), float(loc[1]))\n\n df = p.read_timeseries(source, lonlat, region, variable)\n\n if anomaly:\n df = calc_anom(df, variable)\n\n labels, values = df.to_dygraph_format()\n data = {'labels': labels, 'data': values}\n\n return jsonify(data)\n\n\n@app.route('/_ts_avg/&&', methods=['GET', 'OPTIONS'])\n@app.route('/_ts_avg/&&&', methods=['GET', 'OPTIONS'])\ndef get_ts_average(**kwargs):\n\n anomaly = False\n\n if 'reg' in kwargs:\n region = kwargs['reg']\n if 'src' in kwargs:\n source = p.sources[kwargs['src']].name\n if 'var' in kwargs:\n variable = kwargs['var']\n if 'anom' in kwargs:\n anomaly = True\n\n df = p.average_timeseries(source, region, variable)\n df = df[0]\n\n if anomaly:\n df = calc_anom(df)\n\n labels, values = df.to_dygraph_format()\n data = {'labels': labels, 'data': values}\n\n return jsonify(data)\n\n\ndef calc_anom(df, variable=None):\n \"\"\"\n Calculates anomaly based on climatology for time series.\n\n Parameters\n ----------\n df : pandas DataFrame\n Dataframe containing time series.\n variable : str\n Variable to select from DataFrame\n\n Returns\n -------\n df : pandas DataFrame\n Anomaly of time series.\n \"\"\"\n\n climatology = calc_climatology(df)\n if variable is None:\n variable = df.keys()[0]\n anom = calc_anomaly(df[variable], climatology=climatology)\n\n df[variable] = anom\n columns = []\n for cols in df.columns:\n columns.append(cols + '_anomaly')\n df.columns = columns\n\n return df\n\n\n@app.route('/_tsdown/&&&')\n@app.route('/_tsdown/&&&&')\ndef download_ts(**kwargs):\n \"\"\"\n Initiates download time series (anomaly) in comma separated values format.\n\n Returns\n -------\n jsonified str\n Time series (anomaly) in Dygraphs compatible json format.\n \"\"\"\n\n anomaly = False\n\n if 'reg' in kwargs:\n region = kwargs['reg']\n if 'src' in kwargs:\n source = p.sources[kwargs['src']]\n if 'var' in kwargs:\n variable = kwargs['var']\n if 'loc' in kwargs:\n loc = kwargs['loc']\n if 'anom' in kwargs:\n anomaly = True\n\n loc = loc.split(',')\n lonlat = (float(loc[0]), float(loc[1]))\n\n filename = region + '_' + variable + '_' + loc[0][:6] + '_' + loc[1][:6]\n\n df = p.read_timeseries(source.name, lonlat, region, variable)\n\n if anomaly:\n df = calc_anom(df, variable)\n\n output = StringIO()\n\n df.to_csv(output)\n csv = output.getvalue()\n\n response = make_response(csv)\n response.headers[\"Content-Disposition\"] = (\"attachment; filename=\" +\n filename + \".csv\")\n\n return response\n\n\n@app.route('/_tsdown_avg/&&', methods=['GET', 'OPTIONS'])\n@app.route('/_tsdown_avg/&&&', methods=['GET', 'OPTIONS'])\ndef download_ts_avg(**kwargs):\n\n anomaly = False\n\n if 'reg' in kwargs:\n region = kwargs['reg']\n if 'src' in kwargs:\n source = p.sources[kwargs['src']].name\n if 'var' in kwargs:\n variable = kwargs['var']\n if 'anom' in kwargs:\n anomaly = True\n\n df = p.average_timeseries(source, region, variable)\n df = df[0]\n\n if anomaly:\n df = calc_anom(df)\n\n output = StringIO()\n\n df.to_csv(output)\n csv = output.getvalue()\n\n filename = region + '_' + variable\n\n response = make_response(csv)\n response.headers[\"Content-Disposition\"] = (\"attachment; filename=\" +\n filename + \".csv\")\n\n return response\n\n\n@app.route('/_rimg/&&&', methods=['GET', 'POST'])\ndef request_image(**kwargs):\n \"\"\"\n Creates image for OpenLayers overlay.\n\n Returns\n -------\n StringIO\n Image in StringIO.\n \"\"\"\n\n global vmin\n global vmax\n global metadata\n global cmap\n\n if 'reg' in kwargs:\n region = kwargs['reg']\n if 'src' in kwargs:\n source = p.sources[kwargs['src']]\n if 'var' in kwargs:\n variable = kwargs['var']\n if 'idx' in kwargs:\n idx = kwargs['idx']\n\n pidx = (dates[int(idx)])\n\n img, _, _, metadata = p.read_image(source.name, pidx, region, variable)\n\n if source.unit is not None:\n if metadata is not None and 'unit' not in metadata:\n metadata['unit'] = source.unit\n elif metadata is None:\n metadata = {}\n metadata['unit'] = source.unit\n\n if source.labels is not None and source.xticks is not None:\n if metadata is None:\n metadata = {}\n metadata['labels'] = source.labels\n metadata['xticks'] = source.xticks\n\n if source.valid_range is not None:\n vmin = source.valid_range[0]\n vmax = source.valid_range[1]\n else:\n vmin = np.nanmin(img)\n vmax = np.nanmax(img)\n\n cmap = source.colorbar\n\n # Rescale the image\n n = 10\n img = np.kron(img, np.ones((n, n)))\n img[img == p.nan_value] = np.NAN\n\n buf = StringIO()\n plt.imsave(buf, img, vmin=vmin, vmax=vmax, cmap=cmap)\n\n image = buf.getvalue()\n\n response = make_response(image)\n response.headers[\"Content-Type\"] = (\"image/png; filename=data.png\")\n\n return response\n\n\n@app.route('/_rlegend/&', methods=['GET', 'POST'])\n@app.route('/_rlegend/&&', methods=['GET', 'POST'])\ndef request_legend(**kwargs):\n \"\"\"\n Creates Legend for OpenLayers overlay.\n\n Returns\n -------\n StringIO\n Legend in StringIO.\n \"\"\"\n\n global vmin\n global vmax\n global metadata\n global cmap\n\n fig = plt.figure(figsize=(4, 0.7))\n ax1 = fig.add_axes([0.05, 0.7, 0.9, 0.10])\n norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n\n cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=cmap, norm=norm,\n orientation='horizontal')\n plt.xticks(fontsize=9)\n\n if metadata:\n units = ['units', 'unit', 'UNITS', 'UNIT']\n for unit in units:\n if unit in metadata:\n cb1.set_label(metadata[unit], fontsize=10)\n if 'labels' in metadata.keys() and 'xticks' in metadata.keys():\n cb1.set_ticks(metadata['xticks'])\n cb1.set_ticklabels(metadata['labels'])\n\n fig.patch.set_alpha(0.6)\n\n buf = StringIO()\n plt.savefig(buf)\n plt.close()\n\n image = buf.getvalue()\n\n response = make_response(image)\n response.headers[\"Content-Type\"] = (\"image/png; filename=legend.png\")\n\n return response\n\n\n@app.route('/_variables', methods=['GET', 'POST'])\n@app.route('/_variables/', methods=['GET', 'POST'])\n@app.route('/_variables/', methods=['GET', 'POST'])\ndef request_variables(**kwargs):\n\n if 'reg' in kwargs:\n region = kwargs['reg']\n else:\n region = None\n\n variables = {}\n variables['variables'] = p.get_variables(region)\n\n return jsonify(variables)\n\n\ndef get_subregions(region):\n \"\"\"\n Gets all sub-regions of a region.\n\n Parameters\n ----------\n region : str\n Region to select subregions from.\n\n Returns\n -------\n subregions : list of str\n List of all subregions\n \"\"\"\n idx = p.regions.index(region)\n if p.sub_regions is not None:\n return p.sub_regions[idx]\n else:\n return None\n\n\n@app.route('/_get_geojson/', methods=['GET', 'POST'])\ndef get_geojson(region):\n \"\"\"\n Gets list of coordinates from polygon of region.\n\n Parameters\n ----------\n region : str\n Region to get coordinates from.\n\n Returns\n -------\n coordinates : list\n \"\"\"\n\n shape = Shape(region, p.shapefile).polygon\n\n return jsonify(mapping(shape))\n\n\n@app.route('/about')\ndef about():\n \"\"\"\n Creates the `about` page.\n \"\"\"\n return render_template('about.html', url=url_gl)\n\n\n@app.route('/odk_aggregate/formList')\ndef formList():\n request = urllib2.Request('http://127.0.0.1:8080/ODKAggregate/formList')\n request.add_header('User-agent', 'Mozilla/5.0 (Linux i686)')\n return urllib2.urlopen(request).read()\n\n\n@app.route('/odk_aggregate/submissionList/')\ndef submissionList(formid):\n request = urllib2.Request('http://127.0.0.1:8080/ODKAggregate/view/submissionList?formId=' + formid)\n request.add_header('User-agent', 'Mozilla/5.0 (Linux i686)')\n return urllib2.urlopen(request).read()\n\n\n@app.route('/odk_aggregate/downloadSubmission//')\ndef downloadSubmission(formid, submissionid):\n requestURL = 'http://127.0.0.1:8080/ODKAggregate/view/downloadSubmission?'\n requestURL += 'formId=' + formid\n requestURL += '[@version=null%20and%20@uiVersion=null]/'\n requestURL += formid.upper()\n requestURL += '[@key=' + submissionid + ']'\n request = urllib2.Request(requestURL)\n request.add_header('User-agent', 'Mozilla/5.0 (Linux i686)')\n return urllib2.urlopen(request).read()\n","sub_path":"poets/web/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":18949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"375815579","text":"#coding:utf-8\n\"\"\"\nauthor:hxtkyne\nsource:https://github.com/hxtkyne\nreference:keras document\ndate:2018-1-27\ndescription: keras cnn models on cifar10 dataset\n\"\"\"\n\n# Convolution1D:Conv1D\n# filters:卷积层输出节点数\n# ‘valid’:不进行填充0,‘same':填充0\n# from keras.layers.convolutional import Conv1D\n# Conv1D(filters, kernel_size, strides=1, padding='valid',...)\n\n# Convolution2D:Conv2D:需要注意data_format时channels_last还是channels_first\n# 维度输入:(batch_size, rows, cols, channels)\n# 维度输出:(batch_size, new_rows, new_cols, filters)\n# from keras.layers.convolutional import Conv2D\n# Conv2D(filters, kernel_size, strides=(1,1), padding='valid',...)\nfrom keras.datasets import cifar10\nfrom keras.utils import np_utils\n\nnb_classes = 10\n(x_train, y_train),(x_test,y_test) = cifar10.load_data()\ny_train = np_utils.to_categorical(y_train, nb_classes)\ny_test = np_utils.to_categorical(y_test, nb_classes)\nx_train = x_train.astype(\"float32\")\nx_test = x_test.astype(\"float32\")\nx_train /= 255\nx_test /= 255\nprint(x_train.shape, y_train.shape)\n","sub_path":"keras_doc3.py","file_name":"keras_doc3.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"235813558","text":"import smtplib\nfrom email.message import EmailMessage\nimport config\n\ncontacts = []\nwith open('contacts.txt', 'r') as c:\n next(c)\n for line in c:\n x = line.rstrip().split('\\t')\n contacts.append(x[2])\n c.close()\n\nmsg = EmailMessage()\nmsg['Subject'] = 'Test message'\nmsg['From'] = config.EMAIL_ADDRESS\nmsg['To'] = ', '.join(contacts)\nmsg.set_content('Just testing the python code')\n\nwith open('D:/Documents/UCR/Fall2021/NorthDistrictCheckIn.pdf', 'rb') as f:\n file_data = f.read()\n file_name = f.name\n\nmsg.add_attachment(file_data, maintype='application', subtype='octet-straem', filename='NorthDistrictCheckIn.pdf')\n\nwith open('D:/Pictures/Saved Pictures/zcyhewq.jpg', 'rb') as j:\n jpg_data = j.read()\n jpg_name = j.name\n\nmsg.add_attachment(jpg_data, maintype='image', subtype='jpeg', filename=jpg_name)\n\nwith smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:\n smtp.login(config.EMAIL_ADDRESS, config.EMAIL_PASSWORD)\n smtp.send_message(msg) \n\n","sub_path":"sendMail.py","file_name":"sendMail.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"457957496","text":"import paho.mqtt.client as mqtt\nimport simplejson as json\nimport time\nimport random\nimport os\n\nTHINGSBOARD_HOST = os.getenv('THINGSBOARD_HOST')\n#ACCESS_TOKEN=\"tTkrnM9PZRlNYM8nPaQW\"\nACCESS_TOKEN = os.getenv('ACCESS_TOKEN')\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected With Result Code :\" + str(rc))\n\ndef on_message(client, userdata, msg):\n print(\"sent\")\n'''\ndef on_publish(client, userdata, result):\n print(\"publish: \", result)\n global loop_flag\n loop_flag=0\n client.loop_stop()\n client.disconnect()\n'''\n\n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\n#client.on_publish = on_publish\nclient.username_pw_set(ACCESS_TOKEN)\nclient.connect(THINGSBOARD_HOST, 8080, 60)\nprint(\"after connect\")\n\nclient.loop_start()\n\n\nloop_flag=1\ncounter = 0\nTEMPERATURE = 20\n#msg={\"name\": 1234.0}\n\nwhile loop_flag:\n temperature = TEMPERATURE + (random.random() *15)\n msg = {\"temperature\": temperature}\n client.publish('v1/devices/me/telemetry', json.dumps(msg, use_decimal=True), 1, retain=True)\n time.sleep(10)\n\n'''\nwhile loop_flag == 1:\n print(\"waiting for publish callback to occur : \", counter)\n time.sleep(.001)\n counter+=1\n'''\n","sub_path":"simple-mqtt-nano/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"638381846","text":"BOT_TOKEN = 'YOUR_BOT_TOKEN'\nWEBHOOK_URI = 'https://your.webhook.uri/webhook'\n\nMONGODB_DB = 'camp2017'\nMONGODB_HOST = '127.0.0.1'\nMONGODB_PORT = 27017\n\nCURRENCY_NAME = '小石幣'\nSCANNER_BUTTON_TEXT = '掃下去'\nKEYWORD_MATCH_REWARD = 10\n","sub_path":"app/config-sample.py","file_name":"config-sample.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"611858746","text":"from flask import Flask\nfrom flask import make_response\nfrom flask import request\nfrom flask import render_template\nimport handle2\nimport parsehtml2\n\napp = Flask(__name__)\napp.debug = True\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template('errorpage.html'), 404\n\n@app.errorhandler(500)\ndef page_not_found(error):\n return render_template('errorpage.html'), 500\n\n@app.route('/')\ndef index():\n return \"Hello boys and girls, welcome to IDATA!\" \n\n@app.route('/test/')\ndef test(para):\n# resp = make_response(\"\",302,{'Location':'http://'+para})\n# resp.set_cookie(\"test\",\"awen\")\n# resp.set_cookie('cookie2','testcookie2')\n# resp.headers.extend({'Set-Cookie':'add one cookie'})\n# resp.headers.extend({'Location':'http://www.baidu.com'})\n# return str(resp.headers)\n contentlist = [{'title':'awen','test':'test1'}]\n resp = make_response(render_template('search.html',keyword=para,contentlist=contentlist))\n return resp\n\n@app.route('/handle/')\ndef handle(keyword):\n header = handle2.gethandleheader(keyword,app.debug)\n resp = make_response('',302,header)\n return resp\n\n@app.route('/search//')\ndef search(pagenum):\n cookies = request.cookies\n cookiestr = ''\n for i in cookies:\n cookiestr = cookiestr + i + '=' + cookies[i] + ';' + ' '\n headerlist,htmlcontent = handle2.getsearchcontent('',handle2.getdecodecookiestr(cookiestr))\n contentlist = parsehtml2.makecontentlist(htmlcontent)\n contentlistex = []\n for l in contentlist:\n l['link'] = parsehtml2.getlinkstr(l)\n contentlistex.append(l)\n pagemap = parsehtml2.makelinkmap(pagenum,1)\n resp = make_response(render_template('search.html',keyword=pagenum,contentlist=contentlistex,linkmap=pagemap),200,headerlist)\n return resp\n\n@app.route('/search//')\ndef searchpage(keyword,pagenum):\n cookies = request.cookies\n cookiestr = ''\n for i in cookies:\n cookiestr = cookiestr + i + '=' + cookies[i] + ';' + ' ' \n headerlist,htmlcontent = handle2.dopage(pagenum,handle2.getdecodecookiestr(cookiestr))\n contentlist = parsehtml2.makecontentlist(htmlcontent)\n contentlistex = []\n for l in contentlist:\n l['link'] = parsehtml2.getlinkstr(l)\n contentlistex.append(l)\n pagemap = parsehtml2.makelinkmap(keyword,int(pagenum))\n resp = make_response(render_template('search.html',keyword=keyword,contentlist=contentlistex,linkmap=pagemap),200,headerlist)\n return resp\n\n\n@app.route('/summary/')\ndef summary(info):\n content = handle2.getsumary(info)\n resulthtml = parsehtml2.getsumary(content)\n resp = make_response(resulthtml)\n return resp\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')","sub_path":"idatasearch.py","file_name":"idatasearch.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"33235441","text":"import datetime\nimport json\nimport time\n\ntry:\n from urllib.parse import urlparse\nexcept:\n from urlparse import urlparse\n\nimport boto3\nfrom botocore.exceptions import ClientError\n\ntry:\n from finitestate.common.schema import assert_data_matches_schema\nexcept Exception as e:\n pass\n\n\ntry:\n import finitestate.common.dateutil as fsdt\nexcept Exception as e:\n print(\"Unable to load module finitestate.common.dateutil: {}\".format(e))\n\ns3_client = boto3.client('s3')\ns3_resource = boto3.resource('s3')\n\n\ndef put_to_s3_with_retry(bucket, key, body, max_retries=3, content_md5=None, schema=None):\n if schema:\n try:\n assert_data_matches_schema(body, schema)\n except NameError as e:\n print(\"Unable to load module finitestate.common.schema: {}\".format(e))\n raise e\n\n other_kwargs = {}\n\n if content_md5:\n other_kwargs['ContentMD5'] = content_md5\n\n retry = 0\n while retry < max_retries:\n try:\n return s3_resource.Object(bucket, key).put(Body=body, **other_kwargs)\n except ClientError as err:\n if \"Rate Exceeded\" in err.args[0]:\n retry += 1\n sleep_time = 2**retry\n time.sleep(sleep_time)\n else:\n raise\n\n\ndef put_compliant_object(bucket, key, obj, schema):\n try:\n assert_data_matches_schema(obj, schema)\n except NameError as e:\n print(\"Unable to load module finitestate.common.schema: {}\".format(e))\n raise e\n\n s3_client.put_object(Bucket=bucket, Key=key, Body=json.dumps(obj))\n\n\ndef get_bucket_and_key_from_uri(uri):\n parsed_uri = urlparse(uri)\n return parsed_uri.netloc, parsed_uri.path.lstrip('/')\n\n\ndef slurp_object(bucket, key):\n return s3_client.get_object(Bucket=bucket, Key=key)['Body'].read()\n\n\ndef slurp_object_from_uri(uri):\n bucket, key = get_bucket_and_key_from_uri(uri)\n return slurp_object(bucket, key)\n\n\ndef find_aged_objects(base_uri, older_than, excluded_key_prefixes=None):\n \"\"\"\n A generator function that returns S3 objects under a base location that are older than a specified time.\n :param base_uri: A fully-qualified S3 uri\n :param excluded_key_prefixes: (Optional) A list of excluded S3 object key prefixes\n :param older_than: A datetime.datetime in UTC for absolute comparison or datetime.timedelta for comparison relative to UTC now.\n :return: A generator of boto3 ObjectSummary values.\n\n To correctly supply an absolute datetime.datetime value in UTC for the older_than argument, make sure that you create\n the datetime in such a manner that it has a defined, unambiguous timezone, e.g. datetime.datetime.now(datetime.timezone.utc)\n instead of datetime.datetime.utcnow()\n\n See https://docs.python.org/3/library/datetime.html#datetime.datetime.utcnow\n \"\"\"\n\n bucket, key = get_bucket_and_key_from_uri(base_uri)\n\n if not key:\n raise ValueError('Cannot determine key prefix to delete from {}'.format(bucket))\n elif not key.endswith('/'):\n key = key + '/'\n\n if isinstance(older_than, datetime.datetime):\n def skip(last_modified_utc):\n return last_modified_utc >= older_than\n elif isinstance(older_than, datetime.timedelta):\n now = fsdt.utcnow()\n\n def skip(last_modified_utc):\n return (now - last_modified_utc) >= older_than\n else:\n raise ValueError('older_than must be a datetime.datetime or datetime.timedelta')\n\n if isinstance(excluded_key_prefixes, str):\n excluded_key_prefixes = [excluded_key_prefixes]\n\n for o in s3_resource.Bucket(bucket).objects.filter(Prefix=key):\n if excluded_key_prefixes and any(o.key.startswith(key) for key in excluded_key_prefixes):\n continue\n\n if skip(fsdt.as_utc(o.last_modified)):\n continue\n\n yield o\n","sub_path":"src/finitestate/common/aws/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"558459574","text":"#!/usr/bin/env python\n\nfrom datetime import datetime, date\nfrom decimal import Decimal\nimport math\n\nfrom leather.scales.temporal import Temporal\n\n\nclass Years(Temporal):\n \"\"\"\n A scale that maps years to a coordinate range.\n\n This scale takes linear values (dates, datetimes, or numbers), but treats\n them as ordinal values for purposes of projection. Thus you can use this\n scale to render :class:`.Bars` or :class:`.Columns` for yearly data.\n\n :param domain_min:\n The minimum value of the domain.\n :param domain_max:\n The maximum value of the domain.\n \"\"\"\n def __init__(self, domain_min, domain_max):\n self._min = self._value_as_date(domain_min)\n self._max = self._value_as_date(domain_max)\n\n def _value_as_date(self, value):\n \"\"\"\n Convert a date or number to a date for consistent logic.\n \"\"\"\n if isinstance(value, (datetime, date)):\n return value\n elif isinstance(value, (int, float, Decimal)):\n return date(value, 1, 1)\n\n raise ValueError('Unsupported domain value for Annual scale.')\n\n def project(self, value, range_min, range_max):\n \"\"\"\n Project a value in this scale's domain to a target range.\n \"\"\"\n d = self._value_as_date(value)\n\n segments = self._max.year - self._min.year + 1\n segment_size = (range_max - range_min) / segments\n\n pos = d.year - self._min.year\n\n if pos >= 0:\n pos += 0.5\n else:\n pos -= 0.5\n\n return range_min + (pos * segment_size)\n\n def project_interval(self, value, range_min, range_max):\n \"\"\"\n Project a value in this scale's domain to an interval in the target\n range. This is used for places :class:`.Bars` and :class:`.Columns`.\n \"\"\"\n d = self._value_as_date(value)\n\n segments = self._max.year - self._min.year + 1\n segment_size = (range_max - range_min) / segments\n gap = segment_size * 0.05\n\n pos = d.year - self._min.year\n\n a = range_min + ((pos) * segment_size) + gap\n b = range_min + ((pos + 1) * segment_size) - gap\n\n return (a, b)\n\n def ticks(self, count):\n \"\"\"\n Generate a series of ticks for this scale.\n \"\"\"\n size = int(math.ceil(float(self._max.year - self._min.year) / count))\n values = []\n\n for i in range(count):\n years = self._min.year + (i * size)\n\n values.append(date(years, 1, 1))\n\n return values\n\n def format_tick(self, value, i, count):\n \"\"\"\n Display only year component.\n \"\"\"\n return value.year\n","sub_path":"leather/scales/years.py","file_name":"years.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"338567081","text":"#!/usr/bin/env python\n# coding=utf-8\nimport numpy as np\nimport matplotlib.pyplot as plt\n#[('SubMostBoundID', 'img_max] = img_max\n img = ((img - img_min) * (255/(img_max - img_min))).astype('uint8')\n return img \n\n\ndef get_first_of_dicom_field_as_int(x):\n #get x[0] as in int is x is a 'pydicom.multival.MultiValue', otherwise get int(x)\n if isinstance(x, Iterable):\n return int(x[0])\n else:\n return int(x)\n\n\ndef get_windowing(data):\n dicom_fields = [data[('0028','1050')].value, #window center\n data[('0028','1051')].value, #window width\n data[('0028','1052')].value, #intercept\n data[('0028','1053')].value] #slope\n return [get_first_of_dicom_field_as_int(x) for x in dicom_fields]\n\n\ndef extract_multiwindow_image(data, windows=((40, 80), (40, 40), (60, 40))):\n image = data.pixel_array\n instance_num = int(get_instance_number(data))\n _ , _, intercept, slope = get_windowing(data)\n image_windowed = np.stack(\n [\n window_image(image, center, width, intercept, slope) \n for center, width in windows\n ], \n axis=2\n )\n return image_windowed, instance_num\n\n\ndef save_img(img_array, save_path, mode='RGB'):\n im = Image.fromarray(img_array).convert(mode)\n im.save(save_path)\n\n\ndef clean_dir(dir_path):\n for f in os.listdir(dir_path):\n p = os.path.join(dir_path, f)\n if os.path.isdir(p):\n shutil.rmtree(p)\n else:\n os.remove(p)\n","sub_path":"utils/dicom_utils.py","file_name":"dicom_utils.py","file_ext":"py","file_size_in_byte":5173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"647543642","text":"import binascii\nfrom cryptography import x509\nfrom cryptography import exceptions\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\nimport hashlib\nimport math\nimport re\n\n\ndef pem_to_dict(pem_cert, sha1_string, encoding='utf-8'):\n try:\n # Deserialize the pem encoded certificate into a cryptography x509 object\n deserialized_cert = x509.load_pem_x509_certificate(pem_cert, default_backend())\n\n # Try and determine the SHA1 MD5 and SHA256 Hashes of the certificate\n try:\n sha1 = binascii.hexlify(deserialized_cert.fingerprint(hashes.SHA1())).decode(encoding)\n md5 = binascii.hexlify(deserialized_cert.fingerprint(hashes.MD5())).decode(encoding)\n sha256 = binascii.hexlify(deserialized_cert.fingerprint(hashes.SHA256())).decode(encoding)\n except(AttributeError, TypeError, ValueError) as error:\n #print(\"Could not Hash i.e uniquely identify certificate\")\n sha1 = None\n md5 = None\n sha256 = None\n\n # create issuer dictionary\n try:\n #issuer_dict = deserialized_cert.issuer.rfc4514_string()\n # issuer_dict = dict(field.rfc4514_string().split(\"=\") for field in deserialized_cert.issuer)\n issuer_string = name_to_str(deserialized_cert.issuer)\n except (AttributeError, TypeError, ValueError) as error:\n #print('error parsing issuer object due to: %s' % error)\n issuer_string = {'error': str(error)}\n\n # create subject dictionary\n try:\n #subject_dict = dict(field.rfc4514_string().split(\"=\") for field in deserialized_cert.subject)\n subject_string = name_to_str(deserialized_cert.subject)\n except (AttributeError, TypeError, ValueError) as error:\n #print('error parsing subject object due to: %s' % error)\n subject_string = {'error': str(error)}\n\n # create extension dictionary\n try:\n ext_dict = extensions_to_dict(deserialized_cert.extensions)\n except (AttributeError, TypeError, ValueError) as error:\n #print('error parsing extension object due to: %s' % error)\n ext_dict = None\n except x509.DuplicateExtension as error:\n #print('error parsing extension object due to: %s' % error)\n ext_dict = {'error': 'DuplicateExtension'}\n except x509.UnsupportedGeneralNameType as error:\n #print('error parsing extension object due to: %s' % error)\n ext_dict = {'error': 'UnsupportedGeneralNameType'}\n except UnicodeError as error:\n #print('error parsing extension object due to: %s' % error)\n ext_dict = {'error': 'UnicodeError'}\n\n # creates validity time fields\n try:\n not_before = str(deserialized_cert.not_valid_before)\n except ValueError:\n not_before = None\n try:\n not_after = str(deserialized_cert.not_valid_after)\n except ValueError:\n not_after = None\n\n # Based on ENUM 1 = v1 2 = v3 should only by v3\n try:\n version = str(deserialized_cert.version)\n except:\n version = 'invalid'\n\n try:\n signature_algorithm = deserialized_cert.signature_algorithm_oid._name\n except ValueError:\n signature_algorithm = None\n\n # SHA1 Hash of the DER encoding of the subject (X509_NAME object)\n try:\n subject_name_hash = hashlib.md5(deserialized_cert.subject.public_bytes(default_backend())).hexdigest()\n #print(subject_name_hash)\n except ValueError:\n subject_name_hash = None\n\n try:\n serial_number = deserialized_cert.serial_number\n except ValueError:\n serial_number = None\n\n # features\n try:\n num_of_extensions = len(ext_dict)\n except (TypeError, ValueError, AttributeError):\n num_of_extensions = 0\n try:\n num_of_sub_elements = subject_string.count('=')\n except (TypeError, ValueError, AttributeError):\n num_of_sub_elements = 0\n try:\n num_of_issuer_elements = issuer_string.count('=')\n except (TypeError, ValueError, AttributeError):\n num_of_issuer_elements = 0\n try:\n len_of_extensions = len(str(ext_dict))\n except (TypeError, ValueError, AttributeError):\n len_of_extensions = 0\n try:\n len_of_issuer = len(issuer_string)\n except (TypeError, ValueError, AttributeError):\n len_of_issuer = 0\n try:\n len_of_subject = len(subject_string)\n except (TypeError, ValueError, AttributeError):\n len_of_subject = 0\n try:\n if subject_string.count('CN') < 1:\n raise TypeError\n p = re.compile('CN=.*')\n substring = re.search(p, subject_string).group()\n common_name = substring.split('=')\n shannon_entropy_subject_cn = shannon_entropy(common_name[1])\n except (TypeError, ValueError, AttributeError):\n shannon_entropy_subject_cn = 0\n\n features = {'num_of_extensions': num_of_extensions, 'num_of_sub_elements': num_of_sub_elements,\n 'num_of_issuer_elements': num_of_issuer_elements, 'len_of_extensions': len_of_extensions,\n 'len_of_issuer': len_of_issuer, 'len_of_subject': len_of_subject,\n 'shannon_entropy_subject_cn': shannon_entropy_subject_cn}\n\n # @TODO and parsing for ecc to pull out oid x,y,etc\n certificate = {'hash_id': sha1_string, 'md5': md5, 'sha1': sha1, 'sha256': sha256, 'issuer': issuer_string,\n 'subject': subject_string, 'extensions': ext_dict, 'not_valid_before': not_before,\n 'not_valid_after': not_after, 'version': version, 'signature_algorithm': signature_algorithm,\n 'subject_name_hash': subject_name_hash, 'serial_number': serial_number, 'features': features}\n\n try:\n bits = deserialized_cert.public_key().key_size\n certificate['bits'] = bits\n except (AttributeError, ValueError, TypeError) as error:\n certificate['bits'] = str(error)\n except NotImplementedError:\n certificate['bits'] = None\n certificate['unnamed_curve'] = 'possible_CVE-2020-0601_cert'\n except exceptions.UnsupportedAlgorithm as e:\n certificate['bits'] = None\n certificate['unsupported_curve'] = str(e).split(' ')[0]\n return certificate\n\n except (AttributeError, ValueError, TypeError) as error:\n print(\"Certificate Failed: %s\" % error)\n return {\"hash_id\": sha1_string, 'error': str(error)}\n except:\n print(\"Certificate Failed: Unspecified loading error\")\n return {\"hash_id\": sha1_string, 'error': 'Unspecified loading error'}\n\n\n# convert x509.name object to dictionary Note only works with python with python 3.4 latter\n# @TODO fix to work with earlier version of python (originally did with dict comprehension but could not handel errors)\ndef name_to_str(name_obj):\n return_str = ''\n\n first_iter = None\n for field in name_obj:\n try:\n key = get_rfc4514_name(field.oid._name)\n value = field._value\n\n if first_iter is None:\n return_str = (key + '=' + value)\n first_iter = 1\n else:\n return_str += (',' + key + '=' + value)\n\n except (AttributeError, TypeError, ValueError) as error:\n return_str += (',' + 'error' + '=' + str(error))\n except:\n print('Unknown Error')\n return_str += (',' + 'error' + '=' + 'UnknownError')\n\n return return_str\n\n\n# converts extensions to a dictionary\ndef extensions_to_dict(extensions_obj):\n return_dict = {}\n for ext in extensions_obj:\n try:\n key = ext.oid._name\n ext_values = vars(extensions_obj.get_extension_for_oid(ext.oid).value)\n corrected_values = fix_ext_values(ext_values)\n return_dict[key] = corrected_values\n except (AttributeError, TypeError, ValueError) as error:\n #('error parsing extensions due to: %s' % error)\n return_dict['error'] = str(error)\n return return_dict\n\n#def extensions_to_str()\n\n# fixes extension values to be more readable\n# @TODO fix to clean objects as well\ndef fix_ext_values(values, encoding='utf-8'):\n corrected_values = {}\n try:\n for item in values:\n key = item[1:]\n if isinstance(values[item], (bytes, bytearray)):\n corrected_values[key] = binascii.hexlify(values[item]).decode(encoding)\n else:\n corrected_values[key] = str(values[item])\n return corrected_values\n except (AttributeError, TypeError, ValueError) as error:\n #print('error parsing extension due to: %s' % error)\n return {'error': str(error)}\n\n\ndef shannon_entropy(string):\n \"\"\"Calculates the Shannon entropy of a string\"\"\"\n # get probability of chars in string\n prob = [float(string.count(c)) / len(string) for c in dict.fromkeys(list(string))]\n # calculate the entropy\n entropy = - sum([p * math.log(p) / math.log(2.0) for p in prob])\n return entropy\n\n\n# Lookup the correct name\ndef get_rfc4514_name(name):\n name = name.lower()\n name = ''.join(e for e in name if e.isalnum())\n lookup = {\n 'countryname': 'C',\n 'country': 'C',\n 'localityname': 'L',\n 'locality': 'L',\n 'statename': 'ST',\n 'state': 'ST',\n 'provincename': 'ST',\n 'province': 'ST',\n 'stateorprovincename': 'ST',\n 'organizationname': 'O',\n 'organization': 'O',\n 'organizationalunit': 'OU',\n 'organizationalunitname': 'OU',\n 'commonname': 'CN',\n 'common': 'CN',\n 'streetaddress': 'STREET',\n 'domaincomponent': 'DC',\n 'userid': 'UID'\n }\n if name in lookup:\n return lookup[name]\n else:\n return name\n","sub_path":"Transform.py","file_name":"Transform.py","file_ext":"py","file_size_in_byte":10168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"15867763","text":"__author__ = 'user'\n\nfrom tornado.web import UIModule\n\nclass settings(UIModule):\n def render(self):\n d = {\n 'header_color': '#35D056',\n 'profile_color': '#35B056',\n 'profile_icon_color': '#35D056',\n 'menu_activated_color': '#35B056',\n 'menu_hover_color': '#35D056',\n 'footer_color': '#35D056',\n }\n return self.render_string('_ui_modules/settings.html', admin_setting=d)","sub_path":"AdminSystem/ui_modules/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"561417545","text":"# Copyright (c) 2018, Novo Nordisk Foundation Center for Biosustainability,\n# Technical University of Denmark.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Implement RESTful API endpoints using resources.\"\"\"\n\nimport logging\nimport warnings\n\nfrom flask import abort, g, make_response\nfrom flask_apispec import FlaskApiSpec, MethodResource, marshal_with, use_kwargs\nfrom sqlalchemy.orm import load_only\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom .jwt import jwt_require_claim, jwt_required\nfrom .models import Model, db\nfrom .schemas import Model as ModelSchema\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef init_app(app):\n \"\"\"Register API resources on the provided Flask application.\"\"\"\n\n def register(path, resource):\n app.add_url_rule(path, view_func=resource.as_view(resource.__name__))\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n docs.register(resource, endpoint=resource.__name__)\n\n docs = FlaskApiSpec(app)\n register(\"/models\", Models)\n register(\"/models/\", IndvModel)\n\n\nclass Models(MethodResource):\n \"\"\"Serve all available models or create new entries.\"\"\"\n\n @marshal_with(ModelSchema(many=True, exclude=(\"model_serialized\",)), 200)\n def get(self):\n \"\"\"List all available models.\"\"\"\n logger.debug(\"Retrieving all models\")\n return (\n Model.query.options(\n load_only(\n Model.id,\n Model.name,\n Model.organism_id,\n Model.project_id,\n Model.preferred_map_id,\n Model.default_biomass_reaction,\n Model.ec_model,\n )\n )\n .filter(\n Model.project_id.in_(g.jwt_claims[\"prj\"])\n | Model.project_id.is_(None)\n )\n .all()\n )\n\n @use_kwargs(ModelSchema(exclude=(\"id\",)))\n @marshal_with(ModelSchema(only=(\"id\",)), code=201)\n @jwt_required\n def post(self, **payload):\n \"\"\"Create a new model.\"\"\"\n logger.debug(\"Creating a new model in the model storage\")\n if \"project_id\" in payload:\n jwt_require_claim(payload[\"project_id\"], \"write\")\n new_model = Model(**payload)\n db.session.add(new_model)\n db.session.commit()\n return new_model, 201\n\n\nclass IndvModel(MethodResource):\n \"\"\"Retrieve, update or delete a single model.\"\"\"\n\n @marshal_with(ModelSchema, code=200)\n @marshal_with(None, code=404)\n def get(self, id):\n \"\"\"Return a model by ID.\"\"\"\n logger.debug(f\"Fetching model by ID {id}.\")\n try:\n return (\n Model.query.filter(Model.id == id)\n .filter(\n Model.project_id.in_(g.jwt_claims[\"prj\"])\n | Model.project_id.is_(None)\n )\n .one()\n )\n except NoResultFound:\n abort(404, f\"Cannot find any model with ID {id}.\")\n\n @use_kwargs(ModelSchema(exclude=(\"id\",), partial=True))\n @marshal_with(None, code=204)\n @marshal_with(None, code=404)\n @jwt_required\n def put(self, id, **payload):\n \"\"\"Update a model by ID.\"\"\"\n logger.debug(f\"Updating model with ID {id}.\")\n try:\n model = Model.query.filter(Model.id == id).one()\n except NoResultFound:\n abort(404, f\"Cannot find any model with ID {id}.\")\n jwt_require_claim(model.project_id, \"write\")\n for key, value in payload.items():\n setattr(model, key, value)\n db.session.commit()\n return make_response(\"\", 204)\n\n @marshal_with(None, code=204)\n @marshal_with(None, code=404)\n @jwt_required\n def delete(self, id):\n \"\"\"Delete a model by ID.\"\"\"\n logger.debug(f\"Deleting model with ID {id}.\")\n try:\n model = Model.query.filter(Model.id == id).one()\n except NoResultFound:\n abort(404, f\"Cannot find any model with ID {id}.\")\n jwt_require_claim(model.project_id, \"admin\")\n db.session.delete(model)\n db.session.commit()\n return make_response(\"\", 204)\n","sub_path":"src/model_storage/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":4674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"642173694","text":"'''\r\nThis code is written by Edwin, Chien on 2020/03/12.\r\n3.P & T Company[Python]\r\n'''\r\n# import all of the API from PuLP.\r\nfrom pulp import *\r\n\r\n# Initial Problem\r\nprob = LpProblem(\"P & T Company(Transportation problem)\", LpMinimize)\r\n\r\n# Initial Parameters\r\ndemand = [80, 65, 70, 85]\r\ncost = [464,513,654,867,\r\n 352,416,690,791,\r\n 995,682,388,685]\r\nAvail = [75,125,100]\r\n\r\n# Initial Variables => x_CW, C = 3, W = 4\r\nall_loc = []\r\nfor C in range(len(Avail)):\r\n for W in range(len(demand)):\r\n all_loc.append(str(C+1)+str(W+1))\r\n\r\nx = LpVariable.dicts(\"x\", [loc for loc in all_loc], lowBound=0)\r\n\r\n# Objective Function\r\nprob += lpSum( [x[i] * cost[all_loc.index(i)] for i in all_loc] )\r\n\r\n# Constraint\r\n# For demand\r\nfor _ in range(len(demand)):\r\n prob += lpSum( [x[i] for i in all_loc if i[1]==str(_+1)] ) == demand[_]\r\n# For avail\r\nfor _ in range(len(Avail)):\r\n prob += lpSum( [x[i] for i in all_loc if i[0]==str(_+1)] ) == Avail[_]\r\n\r\n# Problem Solve & Check\r\nprob.solve()\r\nstatus = prob.solve()\r\n\r\n# Show Result\r\nprint(\"Operation status = {}\\n\".format(LpStatus[status]))\r\nprint(\"Optimal_value = {}\\n\".format(value(prob.objective)))\r\nfor i in all_loc:\r\n print('The value of {} is {}.'.format(x[i], x[i].varValue))","sub_path":"3.P & T Company/3.P & T Company.py","file_name":"3.P & T Company.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"620906219","text":"from pprint import pprint\nfrom .db import sync_db_connect\n\nclass_dict = {\n '0': 'sales',\n '1': 'contractor',\n '2': 'promo',\n '3': 'nletter',\n '4': 'partner',\n '5': 'notification',\n '6': 'personal',\n '7': 'skip',\n '8': 'count',\n '9': 'exit'\n}\nclasses = ['sales', 'contractor', 'promo', 'nletter', 'partner', 'notification', 'personal', '1-skip', '2-count', '3-exit']\ndb = sync_db_connect()\n\n\ndef get_next_message():\n message = db['raw_message'].find_one({'label' : None, 'skip': None})\n return message\n\ndef skip_message(msg):\n res = db['raw_message'].update_one({'_id' : msg['_id']}, {'$set' : {'skip' : 1}})\n print(f\"..skipped: {res}\")\n\ndef classify_message(msg, label):\n res = db['raw_message'].update_one({'_id' : msg['_id']}, {'$set' : {'label' : label}})\n print(f\"..labeled: {res}\")\n\ndef count_labels():\n res = db['raw_message'].aggregate([\n {\"$match\" : {'label' : {\"$ne\" : None}}},\n {\"$group\" : {'_id':\"$label\", 'count':{'$sum':1}}}])\n return list(res)\n\n\nif __name__ == '__main__':\n while(True):\n try:\n message = get_next_message()\n\n print(\".....\\n\\n\\n\")\n print(f\"_id: {message['_id']}\")\n print(f\"from: {message['from']}\")\n print(f\"to: {message['to']}\")\n print(f\"subject: {message['subject']}\")\n print(f\"snippet: {message['snippet']}\")\n print(\".....\")\n\n pprint(class_dict)\n q=str(input(\"..what's the class:\"))\n if class_dict[q] == 'skip':\n skip_message(message)\n print('skipped')\n\n continue\n elif class_dict[q] == 'count':\n res = count_labels()\n pprint(res)\n\n continue\n elif class_dict[q] == 'exit':\n exit(0)\n elif class_dict[q] in classes:\n classify_message(message, class_dict[q])\n\n except Exception as e:\n print(f\"!!!!ERROR: {str(e)}\")\n continue","sub_path":"categorizer/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"140702863","text":"# coding:utf-8\n# @Time :2020/10/28 3:22 下午\n# @File :fib_cycle.py\n\n'''\n斐波那契数列F0=0(第0项),F1=1(第1项),...,Fn = F(n-1) + F(n-2)(第n项),\n目标是输出第0至n项的序列,即F0——Fn,故一共输出n+1个数。\n'''\n\n# 使用循环方法生成斐波那契数列\ndef fib_cycle(n): # n表示生成到第0-n项斐波那契数列,即生成前n+1个数字\n cycle_result = [] # 创建一个列表用来存储该数列\n f0, f1 = 0,1 # 保存前两个数字,第0项和第1项初始化\n # 满足循环条件,就继续下面的运算,如果n<=0,即默认输出f0=0\n while n > 0:\n cycle_result.append(f0)\n f0, f1 = f1, f0 + f1\n n -= 1 #循环次数逐渐减少\n cycle_result.append(f0)\n return cycle_result\n\nif __name__ == \"__main__\":\n n = int(input(\"Please enter an integer:\"))\n fib = fib_cycle(n)\n print(fib)\n\n","sub_path":"Fibonacci_sequence/fib_cycle.py","file_name":"fib_cycle.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"7552047","text":"from .__version__ import *\nfrom .chains import *\nfrom .colors import *\nfrom .console import *\nfrom .converters import *\nfrom .decorators import *\n\n\n\n\n__name__ = 'PythonDebugTools'\n__author__ = \"Tyler Stegmaier\"\n__email__ = \"tyler.stegmaier.510@gmail.com\"\n__copyright__ = \"Copyright 2020\"\n__credits__ = [\n \"Copyright (c) 2020 Tyler Stegmaier\",\n ]\n__license__ = \"GPL 3.0\"\n__version__ = version\n__maintainer__ = __author__\n__maintainer_email__ = __email__\n\n# How mature is this project? Common values are\n# 3 - Alpha\n# 4 - Beta\n# 5 - Production/Stable\n__status__ = 'Development Status :: 4 - Beta'\n\n__url__ = fr'https://github.com/Jakar510/{__name__}'\n# download_url=f'https://github.com/Jakar510/PyDebug/TkinterExtensions/releases/tag/{version}'\n__classifiers__ = [\n __status__,\n\n # Indicate who your project is intended for\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Build Tools',\n\n # Pick your license as you wish\n 'License :: Free To Use But Restricted',\n\n # Support platforms\n 'Operating System :: MacOS',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n\n 'Programming Language :: Python :: 3',\n ]\n\n__short_description__ = 'A set of helpers for debugging Python 3.x.'\n","sub_path":"venv/Lib/site-packages/PythonDebugTools/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"544656802","text":"import numpy as np\nimport cv2\n\ncap = cv2.VideoCapture('test.mp4')\n\n# Define the codec and create VideoWriter object\n#fourcc = cv2.cv.CV_FOURCC(*'DIVX')\n#out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\n\n\nout = cv2.VideoWriter('output.avi', fourcc, 20.0,\n (int(cap.get(3)), int(cap.get(4))), isColor=False)\nbackSub = cv2.createBackgroundSubtractorKNN()\nwhile(cap.isOpened()):\n ret, frame = cap.read()\n if ret == True:\n frame = cv2.flip(frame, 0)\n fgMask = backSub.apply(frame)\n # write the flipped frame\n out.write(fgMask)\n\n cv2.imshow('frame', fgMask)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n break\n\n# Release everything if job is finished\ncap.release()\nout.release()\ncv2.destroyAllWindows()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"421346647","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 3 16:05:59 2018\n\n@author: Bounab Yazid\n\"\"\"\nimport re\nimport os\nimport heapq \n\nfrom os import listdir\nfrom os.path import isfile, join\n\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import sent_tokenize\nfrom nltk import pos_tag, word_tokenize\n\nfrom nltk.tag import StanfordNERTagger\n\nimport collections\nimport pandas as pd\n\nfrom gensim.summarization.summarizer import summarize\n\n#https://summari.es/\n#https://towardsdatascience.com/very-simple-python-script-for-extracting-most-common-words-from-a-story-1e3570d0b9d0\n#https://medium.com/agatha-codes/using-textual-analysis-to-quantify-a-cast-of-characters-4f3baecdb5c\n\n#_______________________________________________________________\n\ndef TextFile_To_Sentences(TextFile):\n with open(TextFile, encoding=\"ISO-8859-1\") as f:\n text = f.read()\n sentences = sent_tokenize(text)\n return sentences\n#_______________________________________________________________\n\ndef Read_TextFile(TextFile):\n with open(TextFile, encoding=\"ISO-8859-1\") as f:\n Text = f.read()\n f.close()\n #print (Text)\n punct = re.sub('[A-Za-z]|[0-9]|[\\n\\t]','',Text)\n SymbList = list(dict.fromkeys(punct).keys())\n #print (SymbList)\n return Text,SymbList\n#_______________________________________________________________\n \ndef Preprocessing_Text(Text,punct):\n Text = re.sub('[0-9]+', ' ', Text)\n Text = Text.replace('é','e')\n for p in punct:\n Text = Text.replace(p,' ')\n \n Text = re.sub(' +',' ', Text)\n Text = Text.strip()\n #Text = Text.lower()\n #print (Text)\n return Text\n\n#_______________________________________________________________\n\ndef Term_Frequecy(Text,punct):\n word_frequencies = {}\n Text = Preprocessing_Text(Text,punct)\n for word in Text.lower().split():\n if word not in stopwords.words('english'):\n if word not in word_frequencies:\n word_frequencies[word] = 1\n else:\n word_frequencies[word] += 1\n return word_frequencies\n \n#_______________________________________________________________\n\ndef Text_tokenize(Text):\n Tokens = word_tokenize(Text)\n return Tokens\n\ndef Tagging(Text):\n Tagged_Text = pos_tag(Text_tokenize(Text))\n return Tagged_Text\n#_______________________________________________________________\n\ndef NE_Tagger(Text):\n st = StanfordNERTagger('/home/polo/Downloads/stanford-ner-2018-02-27/classifiers/english.all.3class.distsim.crf.ser.gz',\n\t\t\t\t\t '/home/polo/Downloads/stanford-ner-2018-02-27/stanford-ner.jar', encoding='utf-8')\n\n tokenized_text = word_tokenize(Text)\n classified_text = st.tag(tokenized_text)\n\n #print(classified_text)\n return classified_text\n#_______________________________________________________________\n \ndef find_proper_nouns(Tagged_Text):\n proper_nouns = []\n i = 0\n while i < len(Tagged_Text):\n if Tagged_Text[i][1] == 'NNP':\n if Tagged_Text[i+1][1] == 'NNP':\n proper_nouns.append(Tagged_Text[i][0].lower()+\" \" +Tagged_Text[i+1][0].lower())\n i+=1\n else:\n proper_nouns.append(Tagged_Text[i][0].lower())\n i+=1\n return proper_nouns\n\n#_______________________________________________________________\n \ndef MainCharacter(Text,n_print,word_frequencies):\n NER_Text = [(x.lower(), y) for x,y in NE_Tagger(Text)]\n NER_Text = dict(NER_Text)\n \n word_counter = collections.Counter(word_frequencies)\n MainChar = max(word_counter, key=word_counter.get)\n #print('Main Character :',MainChar)\n return MainChar\n\n#_______________________________________________________________\n\ndef SentsMainChar(sentences,MainChar):\n #but do the SRL before get sentences that contains the main Char\n MainSents = []\n for sent in sentences:\n if MainChar in sent.lower():\n MainSents.append(sent)\n return MainSents\n \n#_______________________________________________________________\n\ndef MostCommon(n_print,Text,word_frequencies):\n\n NER_Text = [(x.lower(), y) for x,y in NE_Tagger(Text)]\n NER_Text = dict(NER_Text)\n #print (NER_Text)\n \n print(\"\\nOK. The {} most common words are as follows\\n\".format(n_print))\n word_counter = collections.Counter(word_frequencies)\n #print(word_counter)\n for word, count in word_counter.most_common(n_print):\n #print(word, \": \", count, \": \",Tagged_Text[word])\n print(word, \": \", count, \": \",NER_Text[word])\n \n print('Most Common Term ',max(word_counter, key=word_counter.get))\n \n \n#_______________________________________________________________\n\ndef DrawMostCommon(n_print,word_frequencies):\n word_counter = collections.Counter(word_frequencies)\n lst = word_counter.most_common(n_print)\n df = pd.DataFrame(lst, columns = ['Word', 'Count'])\n df.plot.bar(x='Word',y='Count')\n\n#_______________________________________________________________\n\ndef Simplified_Sentence(Sentence):\n Simple_Sent = Sentence\n #_____________________parentheticals________________________\n Simple_Sent = re.sub('\".*?\"', '', Simple_Sent) # remove \"....\"\n Simple_Sent = re.sub('\\(.*?\\)', '', Simple_Sent) # remove (...)\n \n #Relative_Clause(Sentence)\n #non-restrictive\n #restrictive appositive phrases \n #participial phrases offset by commas \n\n #adjective and adverb phrases delimited by punctuation \n #particular prepositional phrases \n #lead noun phrases \n #intra-sentential attributions \n #___________________________________________________________\n return Simple_Sent\n#_______________________________________________________________\n\ndef Simplified_Sentences(Sentences):\n Simple_Sents = []\n for Sentence in Sentences:\n #print (Sentence)\n Simplifed_Sent = Simplified_Sentence(Sentence)\n Simple_Sents.append(Simplifed_Sent)\n return Simple_Sents\n\n#_______________________________________________________________\n\ndef Summarize_Story0():\n Text,punct = Read_TextFile('Alan Turing.txt')\n Precessed_Text = Preprocessing_Text(Text,punct)\n\n Term_Frequecy(Precessed_Text,punct)\n n_print = int(input(\"How many most common words to print: \"))\n DrawMostCommon(n_print)\n\n print('____________________________________________________')\n\n MostCommon(n_print,Text)\n MainChar = MainCharacter(Text,n_print)\n\n sentences = TextFile_To_Sentences('Alan Turing.txt')\n #print (\"\\n\".join(sentences))\n print('____________________________________________________')\n MainSents = Simplified_Sentences(SentsMainChar(sentences,MainChar))\n print (\"\\n.............\\n\".join(MainSents))\n print('____________________________________________________')\n print('Number of Sentences Containing Main Character ',MainChar,' = ', len(MainSents))\n #proper_nouns = find_proper_nouns(Tagged_Text)\n #print (summarize_text(proper_nouns, 10))\n print('____________________________________________________')\n with open(\"MSummary Alan Turing.txt\", \"w\") as output:\n output.write(\"\".join(MainSents))\n\n#_______________________________________________________________\n\ndef Summarize_Story(filename,n_print):\n Text,punct = Read_TextFile(filename)\n Precessed_Text = Preprocessing_Text(Text,punct)\n\n Term_Frequecy(Precessed_Text,punct)\n \n MainChar = MainCharacter(Text,n_print)\n\n sentences = TextFile_To_Sentences(filename)\n\n MainSents = SentsMainChar(sentences,MainChar)\n MainSents = Simplified_Sentences(MainSents)\n \n return MainSents\n#_______________________________________________________________\n \ndef Summarize_Ranked_Sentences(filename):\n word_frequencies = {}\n Weighted_frequencies = {}\n sentence_scores = {}\n\n Text,punct = Read_TextFile(filename)\n Precessed_Text = Preprocessing_Text(Text,punct)\n\n word_frequencies = Term_Frequecy(Precessed_Text,punct)\n \n sentences = TextFile_To_Sentences(filename)\n\n Weighted_frequencies = word_frequencies\n maximum_frequncy = max(word_frequencies.values())\n \n for word in Weighted_frequencies.keys(): \n Weighted_frequencies[word] = (Weighted_frequencies[word]/maximum_frequncy)\n \n for sent in sentences: \n for word in word_tokenize(sent.lower()):\n if word in word_frequencies.keys():\n if len(sent.split(' ')) < 30: \n if sent not in sentence_scores.keys():\n sentence_scores[sent] = word_frequencies[word]\n else:\n sentence_scores[sent] += word_frequencies[word]\n \n summary_sentences = heapq.nlargest(7, sentence_scores, key=sentence_scores.get)\n summary_sentences = Simplified_Sentences(summary_sentences)\n #summary = ' '.join(summary_sentences) \n #print(summary)\n return summary_sentences\n \n#_______________________________________________________________\n\ndef Read_BBC_News_Summary():\n News_Articles = '/home/polo/.config/spyder-py3/Co-referece/BBC News Summary/News Articles'\n #Summaries = '/home/polo/.config/spyder-py3/Co-referece/BBC News Summary/Summaries'\n Machine_Summary = '/home/polo/.config/spyder-py3/Co-referece/BBC News Summary/Machine Summary'\n \n SubDirectories = os.listdir(News_Articles)\n \n try: \n os.mkdir(Machine_Summary)\n except OSError: \n print (\"Creation of the directory %s failed\" % Machine_Summary)\n for subdir in os.listdir(News_Articles):\n try: \n os.mkdir(Machine_Summary+'/'+subdir)\n except OSError: \n print (\"Creation of the directory %s failed\" % subdir)\n\n print (SubDirectories)\n \n #n_print = int(input(\"How many most common words to print: \"))\n \n for subdir in SubDirectories:\n files = [f for f in listdir(News_Articles+'/'+subdir) if isfile(join(News_Articles+'/'+subdir, f))] \n for f in files:\n print(News_Articles+'/'+subdir+'/'+f)\n MainSents = Summarize_Ranked_Sentences(News_Articles+'/'+subdir+'/'+f)\n #MainSents = Summarize_Story(News_Articles+'/'+subdir+'/'+f,n_print)\n with open(Machine_Summary+'/'+subdir+'/'+f, \"w\") as output:\n output.write(\"\".join(MainSents))\n \n print('____________________________________________________')\n\n#_______________________________________________________________\n \ndef Read_StoryTelling_Summary():\n Stories = '/home/polo/.config/spyder-py3/Co-referece/Fairy tales/Storynory'\n #Summaries = '/home/polo/.config/spyder-py3/Co-referece/BBC News Summary/Summaries'\n Machine_Summary = '/home/polo/.config/spyder-py3/Co-referece/Fairy tales/Machine Summary'\n \n SubDirectories = os.listdir(Stories)\n \n try: \n os.mkdir(Machine_Summary)\n except OSError: \n print (\"Creation of the directory %s failed\" % Machine_Summary)\n for subdir in os.listdir(Stories):\n try: \n os.mkdir(Machine_Summary+'/'+subdir)\n except OSError: \n print (\"Creation of the directory %s failed\" % subdir)\n\n print (SubDirectories)\n \n #n_print = int(input(\"How many most common words to print: \"))\n \n for subdir in SubDirectories:\n files = [f for f in listdir(Stories+'/'+subdir) if isfile(join(Stories+'/'+subdir, f))] \n for f in files:\n print(Stories+'/'+subdir+'/'+f)\n MainSents = Summarize_Ranked_Sentences(Stories+'/'+subdir+'/'+f)\n #MainSents = Summarize_Story(News_Articles+'/'+subdir+'/'+f,n_print)\n Text,punct = Read_TextFile(Stories+'/'+subdir+'/'+f)\n with open(Machine_Summary+'/'+subdir+'/'+f, \"w\") as output:\n output.write(\"\".join(MainSents))\n \n with open(Machine_Summary+'/'+subdir+'/TextRank.txt', \"w\") as output:\n output.write(\"\".join(summarize(Text)))\n print('____________________________________________________')\n \n#_______________________________________________________________\n\n#Summarize_Story(filename,15)\n \n#Read_BBC_News_Summary()\nRead_StoryTelling_Summary()\n\n#print(Summarize_Ranked_Sentences('Alan Turing.txt'))\n\n#print(Summarize_Ranked_Sentences('/home/polo/.config/spyder-py3/Co-referece/Fairy tales/Storynory/Hansel and Gretel/Hansel and Gretel.txt'))","sub_path":"Summarization.py","file_name":"Summarization.py","file_ext":"py","file_size_in_byte":12328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"503213770","text":"import os\r\nimport pyttsx3 \r\n\r\nprint(\"Hello Mridu, I am Ron! your chat Bot\")\r\npyttsx3.speak(\"Hello Mridu, I am Ron! your chat Bot\")\r\n\r\n \r\nwhile(True):\r\n print()\r\n\t\r\n print(\"Which service would you like to use: \\t\" ,end='')\r\n pyttsx3.speak(\"Which service would you like to use\")\r\n\t\r\n p=input()\r\n\r\n \r\n if (\"chrome\" in p):\r\n os.system(\"chrome\")\r\n \r\n \r\n elif (\"quick access\" in p):\r\n os.system(\"start menu\")\r\n \r\n elif (\"editor\" in p) or (\"notepad++\" in p):\r\n os.system(\"notepad++\")\r\n \r\n elif (\"linkedin\" in p):\r\n os.system(\"chrome https://www.linkedin.com/in/mridula-gaba-46913215a/\")\r\n \r\n elif (\"gmail\" in p):\r\n os.system(\"chrome www.gmail.com\")\r\n \r\n elif (\"exit\" in p) or (\"close\" in p):\r\n print(\"It seems you want to exit! Happy to help you! Good Bye!\")\r\n pyttsx3.speak(\"It seems you want to exit! Happy to help you! Good Bye!\")\r\n break\r\n \r\n else:\r\n print(\"I am sorry, this service is not available\")\r\n pyttsx3.speak(\"I am sorry, this service is not available\")","sub_path":"mypython.py","file_name":"mypython.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"488147479","text":"import copy\nfrom queue import Queue\n\ninp = 1358\nsize = 100\nmaze = [None]*size\n\n\ndef printMaze():\n global maze\n global size\n for x in range(size):\n row = ''.join(maze[x])\n print(row)\n\ndef neighbors(point):\n to = [point.y,point.x]\n global maze\n global size\n retn = []\n if to[0]+1 < size and maze[to[0]+1][to[1]] == '.':\n down = copy.copy(to)\n down[0] += 1\n retn.append(down)\n if to[1]+1 < size and maze[to[0]][to[1]+1] == '.':\n right = copy.copy(to)\n right[1] += 1\n retn.append(right)\n if to[0]-1 >= 0 and maze[to[0]-1][to[1]] == '.':\n up = copy.copy(to)\n up[0] -= 1\n retn.append(up)\n if to[1]-1 >= 0 and maze[to[0]][to[1]-1] == '.':\n left = copy.copy(to)\n left[1] -= 1\n retn.append(left)\n retn = [myTile(r[0],r[1]) for r in retn]\n return retn\n\nfor y in range(size):\n maze[y] = [' ']*size\n for x in range(size):\n tile = x*x + 3*x + 2*x*y + y + y*y + inp\n tile = bin(tile).count(\"1\")\n if tile % 2 == 0:\n maze[y][x] = '.'\n else:\n maze[y][x] = '#'\nclass myTile(object):\n def __init__(self,y=0,x=0,frm = None):\n self.x = x\n self.y = y\n self.frm = frm\n def __eq__(self,other):\n if not isinstance(other, myTile):\n return False\n if self.x != other.x or self.y != other.y:\n return False\n return True\n def __hash__(self):\n return hash((self.y,self.x))\n\nstart = myTile(1,1)\nend = [39,31]\npath =[]\n\nfrontier = Queue()\nfrontier.put(start)\nvisited = []\nvisited.append(start)\nwhile not frontier.empty():\n current = frontier.get()\n print(current)\n for nxt in neighbors(current):\n if nxt not in visited:\n frontier.put(nxt)\n nxt.frm = current\n visited.append(nxt)\n\n\ntemp = visited[visited.index(myTile(39,31))]\nstep = 0\nwhile temp.frm != None:\n temp = temp.frm\n step += 1\nfor tile in visited:\n maze[tile.y][tile.x] = 'O'\nprintMaze()\nprint(step)\n\n \n","sub_path":"day13/day13p1.py","file_name":"day13p1.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"257740910","text":"def exam16():\n t=int(input())\n for T in range(t):\n n=int(input())\n if n==1 :\n print(3)\n elif n==2:\n print(8)\n elif n==3:\n print(19)\n else:\n sum=0\n for i in range(2,n):\n sum=sum+i*(i-1)//2\n print(1+(n-1)*n+3*sum)\nexam16()\n","sub_path":"Code/CodeRecords/2679/60586/290230.py","file_name":"290230.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"550849984","text":"class Solution:\n def remove_duplicate(self, arr, N):\n\n k=1\n i=0\n j=1\n\n while(j